Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.7/20/
2 tests failed. FAILED: org.apache.solr.cloud.hdfs.StressHdfsTest.test Error Message: There are still nodes recoverying - waited for 330 seconds Stack Trace: java.lang.AssertionError: There are still nodes recoverying - waited for 330 seconds at __randomizedtesting.SeedInfo.seed([8DB2B1576E8C69DC:5E68E8DC0700424]:0) at org.junit.Assert.fail(Assert.java:88) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:195) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:143) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:138) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:1008) at org.apache.solr.cloud.hdfs.StressHdfsTest.test(StressHdfsTest.java:117) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1075) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1047) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:947) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:832) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:883) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:894) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: org.apache.solr.uninverting.TestDocTermOrdsUninvertLimit.testTriggerUnInvertLimit Error Message: GC overhead limit exceeded Stack Trace: java.lang.OutOfMemoryError: GC overhead limit exceeded at __randomizedtesting.SeedInfo.seed([8DB2B1576E8C69DC:BE009993633BB36B]:0) at org.apache.lucene.codecs.memory.DirectPostingsFormat$DirectField.<init>(DirectPostingsFormat.java:458) at org.apache.lucene.codecs.memory.DirectPostingsFormat$DirectFields.<init>(DirectPostingsFormat.java:129) at org.apache.lucene.codecs.memory.DirectPostingsFormat.fieldsProducer(DirectPostingsFormat.java:113) at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.<init>(PerFieldPostingsFormat.java:283) at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat.fieldsProducer(PerFieldPostingsFormat.java:363) at org.apache.lucene.index.SegmentCoreReaders.<init>(SegmentCoreReaders.java:113) at org.apache.lucene.index.SegmentReader.<init>(SegmentReader.java:83) at org.apache.lucene.index.ReadersAndUpdates.getReader(ReadersAndUpdates.java:172) at org.apache.lucene.index.IndexWriter.mergeMiddle(IndexWriter.java:4627) at org.apache.lucene.index.IndexWriter.merge(IndexWriter.java:4077) at org.apache.lucene.index.SerialMergeScheduler.merge(SerialMergeScheduler.java:40) at org.apache.lucene.index.IndexWriter.maybeMerge(IndexWriter.java:2177) at org.apache.lucene.index.IndexWriter.processEvents(IndexWriter.java:5139) at org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1619) at org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1235) at org.apache.lucene.index.RandomIndexWriter.addDocument(RandomIndexWriter.java:189) at org.apache.solr.uninverting.TestDocTermOrdsUninvertLimit.testTriggerUnInvertLimit(TestDocTermOrdsUninvertLimit.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) Build Log: [...truncated 13716 lines...] [junit4] Suite: org.apache.solr.uninverting.TestDocTermOrdsUninvertLimit [junit4] 2> NOTE: download the large Jenkins line-docs file by running 'ant get-jenkins-line-docs' in the lucene directory. [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestDocTermOrdsUninvertLimit -Dtests.method=testTriggerUnInvertLimit -Dtests.seed=8DB2B1576E8C69DC -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/test-data/enwiki.random.lines.txt -Dtests.locale=ar-IQ -Dtests.timezone=America/Rosario -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [junit4] ERROR 291s J0 | TestDocTermOrdsUninvertLimit.testTriggerUnInvertLimit <<< [junit4] > Throwable #1: java.lang.OutOfMemoryError: GC overhead limit exceeded [junit4] > at __randomizedtesting.SeedInfo.seed([8DB2B1576E8C69DC:BE009993633BB36B]:0) [junit4] > at org.apache.lucene.codecs.memory.DirectPostingsFormat$DirectField.<init>(DirectPostingsFormat.java:458) [junit4] > at org.apache.lucene.codecs.memory.DirectPostingsFormat$DirectFields.<init>(DirectPostingsFormat.java:129) [junit4] > at org.apache.lucene.codecs.memory.DirectPostingsFormat.fieldsProducer(DirectPostingsFormat.java:113) [junit4] > at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.<init>(PerFieldPostingsFormat.java:283) [junit4] > at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat.fieldsProducer(PerFieldPostingsFormat.java:363) [junit4] > at org.apache.lucene.index.SegmentCoreReaders.<init>(SegmentCoreReaders.java:113) [junit4] > at org.apache.lucene.index.SegmentReader.<init>(SegmentReader.java:83) [junit4] > at org.apache.lucene.index.ReadersAndUpdates.getReader(ReadersAndUpdates.java:172) [junit4] > at org.apache.lucene.index.IndexWriter.mergeMiddle(IndexWriter.java:4627) [junit4] > at org.apache.lucene.index.IndexWriter.merge(IndexWriter.java:4077) [junit4] > at org.apache.lucene.index.SerialMergeScheduler.merge(SerialMergeScheduler.java:40) [junit4] > at org.apache.lucene.index.IndexWriter.maybeMerge(IndexWriter.java:2177) [junit4] > at org.apache.lucene.index.IndexWriter.processEvents(IndexWriter.java:5139) [junit4] > at org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1619) [junit4] > at org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1235) [junit4] > at org.apache.lucene.index.RandomIndexWriter.addDocument(RandomIndexWriter.java:189) [junit4] > at org.apache.solr.uninverting.TestDocTermOrdsUninvertLimit.testTriggerUnInvertLimit(TestDocTermOrdsUninvertLimit.java:66) [junit4] 2> NOTE: test params are: codec=Asserting(Lucene70): {field=PostingsFormat(name=Direct)}, docValues:{}, maxPointsInLeafNode=1885, maxMBSortInHeap=6.382781388324267, sim=RandomSimilarity(queryNorm=false): {field=DFR I(ne)B1}, locale=ar-IQ, timezone=America/Rosario [junit4] 2> NOTE: Linux 4.4.0-112-generic amd64/Oracle Corporation 1.8.0_191 (64-bit)/cpus=4,threads=1,free=245978896,total=477626368 [junit4] 2> NOTE: All tests run in this JVM: [TestHdfsUpdateLog, AliasIntegrationTest, DistributedTermsComponentTest, BlockCacheTest, TestElisionMultitermQuery, ResponseLogComponentTest, TestIndexSearcher, ZkNodePropsTest, AlternateDirectoryTest, TestLRUCache, TestXmlQParserPlugin, LegacyCloudClusterPropTest, TestInitParams, AssignTest, ReplaceNodeTest, LeaderFailureAfterFreshStartTest, CustomCollectionTest, NodeMarkersRegistrationTest, SystemInfoHandlerTest, TestSizeLimitedDistributedMap, SolrGraphiteReporterTest, MetricTriggerIntegrationTest, TestTolerantUpdateProcessorCloud, DistributedVersionInfoTest, MergeStrategyTest, CreateCollectionCleanupTest, TestCloudPseudoReturnFields, TestSubQueryTransformerCrossCore, IndexSchemaTest, SolrXmlInZkTest, DocValuesMissingTest, TestStressLucene, SuggestComponentContextFilterQueryTest, ShardRoutingCustomTest, CurrencyRangeFacetCloudTest, TestDocTermOrdsUninvertLimit] [junit4] Completed [127/842 (1!)] on J0 in 291.57s, 1 test, 1 error <<< FAILURES! [...truncated 1231 lines...] [junit4] Suite: org.apache.solr.cloud.hdfs.StressHdfsTest [junit4] 2> 4116399 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom [junit4] 2> Creating dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/init-core-data-001 [junit4] 2> 4116400 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false [junit4] 2> 4116415 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776) [junit4] 2> 4116415 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: / [junit4] 1> Formatting using clusterid: testClusterID [junit4] 2> 4116628 WARN (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties [junit4] 2> 4116705 WARN (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 4116707 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 4116798 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs to ./temp/Jetty_lucene2.us.west_apache_org_46393_hdfs____xbblmd/webapp [junit4] 2> 4118317 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Started HttpServer2$selectchannelconnectorwithsafestar...@lucene2-us-west.apache.org:46393 [junit4] 2> 4119648 WARN (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 4119683 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 4119810 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_37837_datanode____115jfj/webapp [junit4] 2> 4120982 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:37837 [junit4] 2> 4121852 WARN (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 4121853 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 4122173 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_40602_datanode____ayryjn/webapp [junit4] 2> 4123463 ERROR (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data1/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data2/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:44066) [ ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000 [junit4] 2> 4124044 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x9222eedca3508d: from storage DS-ecdaa019-2d25-4830-8fb9-8e037a314267 node DatanodeRegistration(127.0.0.1:45334, datanodeUuid=da90ac70-e627-48d4-bb5e-631aaddb776e, infoPort=34080, infoSecurePort=0, ipcPort=41230, storageInfo=lv=-56;cid=testClusterID;nsid=593142609;c=0), blocks: 0, hasStaleStorage: true, processing time: 0 msecs [junit4] 2> 4124044 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x9222eedca3508d: from storage DS-bd758590-56ed-4c34-b2f9-5dd6d9ef1a19 node DatanodeRegistration(127.0.0.1:45334, datanodeUuid=da90ac70-e627-48d4-bb5e-631aaddb776e, infoPort=34080, infoSecurePort=0, ipcPort=41230, storageInfo=lv=-56;cid=testClusterID;nsid=593142609;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs [junit4] 2> 4124474 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:40602 [junit4] 2> 4124893 ERROR (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data3/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data4/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:44066) [ ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000 [junit4] 2> 4124945 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x9222ef18729a0c: from storage DS-48e37673-cd7c-4c80-8bb3-40ac7410a17c node DatanodeRegistration(127.0.0.1:38038, datanodeUuid=89dabc1c-d59e-4018-be86-851f660b9d6b, infoPort=34093, infoSecurePort=0, ipcPort=38089, storageInfo=lv=-56;cid=testClusterID;nsid=593142609;c=0), blocks: 0, hasStaleStorage: true, processing time: 0 msecs [junit4] 2> 4124945 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x9222ef18729a0c: from storage DS-0c710ac6-4b85-4e7d-b80d-0f85def6d517 node DatanodeRegistration(127.0.0.1:38038, datanodeUuid=89dabc1c-d59e-4018-be86-851f660b9d6b, infoPort=34093, infoSecurePort=0, ipcPort=38089, storageInfo=lv=-56;cid=testClusterID;nsid=593142609;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs [junit4] 2> 4125567 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER [junit4] 2> 4125567 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0 [junit4] 2> 4125567 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer Starting server [junit4] 2> 4125677 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer start zk server on port:43520 [junit4] 2> 4125677 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:43520 [junit4] 2> 4125677 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 43520 [junit4] 2> 4125700 INFO (zkConnectionManagerCallback-14911-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4125756 INFO (zkConnectionManagerCallback-14913-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4125757 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml [junit4] 2> 4125758 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml [junit4] 2> 4125759 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml [junit4] 2> 4125760 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt [junit4] 2> 4125761 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt [junit4] 2> 4125761 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml [junit4] 2> 4125774 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml [junit4] 2> 4125775 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json [junit4] 2> 4125776 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt [junit4] 2> 4125777 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt [junit4] 2> 4125778 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt [junit4] 2> 4125791 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly asked otherwise [junit4] 2> 4126345 WARN (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time [junit4] 2> 4126346 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0) [junit4] 2> 4126346 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ... [junit4] 2> 4126346 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12 [junit4] 2> 4126347 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 4126347 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 4126347 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.session node0 Scavenging every 600000ms [junit4] 2> 4126347 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@51901a3c{/,null,AVAILABLE} [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@4af97cce{HTTP/1.1,[http/1.1]}{127.0.0.1:35087} [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.e.j.s.Server Started @4126384ms [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://lucene2-us-west.apache.org:44066/hdfs__lucene2-us-west.apache.org_44066__home_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-7.7_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001_tempDir-002_control_data, replicaType=NRT, hostContext=/, hostPort=35087, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/../../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/control-001/cores} [junit4] 2> 4126348 ERROR (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.7.2 [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 4126348 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-05-25T14:59:17.368Z [junit4] 2> 4126362 INFO (zkConnectionManagerCallback-14915-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4126363 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 4126363 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/control-001/solr.xml [junit4] 2> 4126379 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored [junit4] 2> 4126379 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored [junit4] 2> 4126380 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 4127146 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false] [junit4] 2> 4127167 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:43520/solr [junit4] 2> 4127180 INFO (zkConnectionManagerCallback-14919-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4127181 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x10993c354610003, likely client has closed socket [junit4] 2> 4127182 INFO (zkConnectionManagerCallback-14921-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4127450 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:35087_ [junit4] 2> 4127450 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.c.Overseer Overseer (id=74753335971872772-127.0.0.1:35087_-n_0000000000) starting [junit4] 2> 4127470 INFO (zkConnectionManagerCallback-14928-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4127488 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4127491 INFO (OverseerStateUpdate-74753335971872772-127.0.0.1:35087_-n_0000000000) [n:127.0.0.1:35087_ ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:35087_ [junit4] 2> 4127493 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:35087_ [junit4] 2> 4127512 INFO (zkCallback-14927-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 4127540 INFO (zkCallback-14920-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 4127607 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 4127695 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4127732 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4127732 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4127733 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [n:127.0.0.1:35087_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/../../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/control-001/cores [junit4] 2> 4127887 INFO (zkConnectionManagerCallback-14934-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4127888 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 4127889 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4127907 INFO (qtp1116618231-45928) [n:127.0.0.1:35087_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:35087_&wt=javabin&version=2 and sendToOCPQueue=true [junit4] 2> 4127922 INFO (OverseerThreadFactory-8325-thread-1-processing-n:127.0.0.1:35087_) [n:127.0.0.1:35087_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection control_collection [junit4] 2> 4128028 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ x:control_collection_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT [junit4] 2> 4128028 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ x:control_collection_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 4129167 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.7.2 [junit4] 2> 4129261 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test [junit4] 2> 4130021 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id [junit4] 2> 4130145 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' using configuration from collection control_collection, trusted=true [junit4] 2> 4130162 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.replica_n1' (registry 'solr.core.control_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4130163 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory solr.hdfs.home=hdfs://lucene2-us-west.apache.org:44066/solr_hdfs_home [junit4] 2> 4130163 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled [junit4] 2> 4130163 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/control-001/cores/control_collection_shard1_replica_n1], dataDir=[hdfs://lucene2-us-west.apache.org:44066/solr_hdfs_home/control_collection/core_node2/data/] [junit4] 2> 4130164 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://lucene2-us-west.apache.org:44066/solr_hdfs_home/control_collection/core_node2/data/snapshot_metadata [junit4] 2> 4130200 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct memory allocation set to [true] [junit4] 2> 4130200 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of [8388608] will allocate [1] slabs and use ~[8388608] bytes [junit4] 2> 4130200 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache [junit4] 2> 4130257 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.b.BlockDirectory Block cache on write is disabled [junit4] 2> 4130258 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://lucene2-us-west.apache.org:44066/solr_hdfs_home/control_collection/core_node2/data [junit4] 2> 4130328 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://lucene2-us-west.apache.org:44066/solr_hdfs_home/control_collection/core_node2/data/index [junit4] 2> 4130346 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct memory allocation set to [true] [junit4] 2> 4130346 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of [8388608] will allocate [1] slabs and use ~[8388608] bytes [junit4] 2> 4130346 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache [junit4] 2> 4130389 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.b.BlockDirectory Block cache on write is disabled [junit4] 2> 4130389 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=19, maxMergeAtOnceExplicit=15, maxMergedSegmentMB=12.8525390625, floorSegmentMB=0.783203125, forceMergeDeletesPctAllowed=0.1856908761435505, segmentsPerTier=42.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.3258451717769235, deletesPctAllowed=33.82998121166678 [junit4] 2> 4130490 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:45334 is added to blk_1073741825_1001{UCState=COMMITTED, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-48e37673-cd7c-4c80-8bb3-40ac7410a17c:NORMAL:127.0.0.1:38038|RBW], ReplicaUC[[DISK]DS-bd758590-56ed-4c34-b2f9-5dd6d9ef1a19:NORMAL:127.0.0.1:45334|RBW]]} size 69 [junit4] 2> 4130490 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:38038 is added to blk_1073741825_1001 size 69 [junit4] 2> 4131022 WARN (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}} [junit4] 2> 4131274 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog [junit4] 2> 4131275 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 4131275 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=2 [junit4] 2> 4131335 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 4131335 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 4131349 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=49, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0] [junit4] 2> 4131423 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@5de16850[control_collection_shard1_replica_n1] main] [junit4] 2> 4131424 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 4131425 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 4131425 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms. [junit4] 2> 4131458 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1634516454581731328 [junit4] 2> 4131461 INFO (searcherExecutor-8330-thread-1-processing-n:127.0.0.1:35087_ x:control_collection_shard1_replica_n1 c:control_collection s:shard1) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [control_collection_shard1_replica_n1] Registered new searcher Searcher@5de16850[control_collection_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 4131498 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/control_collection/terms/shard1 to Terms{values={core_node2=0}, version=0} [junit4] 2> 4131498 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/control_collection/leaders/shard1 [junit4] 2> 4131512 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 4131512 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 4131512 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:35087/control_collection_shard1_replica_n1/ [junit4] 2> 4131513 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me [junit4] 2> 4131513 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy http://127.0.0.1:35087/control_collection_shard1_replica_n1/ has no replicas [junit4] 2> 4131513 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 4131513 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/control_collection/leaders/shard1/leader after winning as /collections/control_collection/leader_elect/shard1/election/74753335971872772-core_node2-n_0000000000 [junit4] 2> 4131622 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:35087/control_collection_shard1_replica_n1/ shard1 [junit4] 2> 4131642 INFO (zkCallback-14920-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1]) [junit4] 2> 4131643 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary [junit4] 2> 4131653 INFO (zkCallback-14920-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1]) [junit4] 2> 4131677 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=3649 [junit4] 2> 4131783 INFO (qtp1116618231-45928) [n:127.0.0.1:35087_ ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas [junit4] 2> 4131832 INFO (zkCallback-14920-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1]) [junit4] 2> 4131832 INFO (zkCallback-14920-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1]) [junit4] 2> 4131833 INFO (qtp1116618231-45928) [n:127.0.0.1:35087_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:35087_&wt=javabin&version=2} status=0 QTime=3926 [junit4] 2> 4131833 INFO (zkCallback-14920-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1]) [junit4] 2> 4131904 INFO (zkConnectionManagerCallback-14939-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4131905 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 4131906 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4131906 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false [junit4] 2> 4131907 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2 and sendToOCPQueue=true [junit4] 2> 4131962 INFO (OverseerCollectionConfigSetProcessor-74753335971872772-127.0.0.1:35087_-n_0000000000) [n:127.0.0.1:35087_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper [junit4] 2> 4131962 INFO (OverseerThreadFactory-8325-thread-2-processing-n:127.0.0.1:35087_) [n:127.0.0.1:35087_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection collection1 [junit4] 2> 4132184 WARN (OverseerThreadFactory-8325-thread-2-processing-n:127.0.0.1:35087_) [n:127.0.0.1:35087_ ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to create a collection (collection1) without cores. [junit4] 2> 4132186 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas [junit4] 2> 4132186 INFO (qtp1116618231-45930) [n:127.0.0.1:35087_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2} status=0 QTime=279 [junit4] 2> 4132196 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Creating jetty instances pullReplicaCount=0 numOtherReplicas=7 [junit4] 2> 4132699 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-1-001 of type NRT [junit4] 2> 4132718 WARN (closeThreadPool-14940-thread-1) [ ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time [junit4] 2> 4132719 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0) [junit4] 2> 4132719 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ... [junit4] 2> 4132719 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12 [junit4] 2> 4132844 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 4132844 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 4132844 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session node0 Scavenging every 660000ms [junit4] 2> 4132847 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@2fded126{/,null,AVAILABLE} [junit4] 2> 4132849 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@13034481{HTTP/1.1,[http/1.1]}{127.0.0.1:44517} [junit4] 2> 4132849 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.Server Started @4132886ms [junit4] 2> 4132849 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://lucene2-us-west.apache.org:44066/hdfs__lucene2-us-west.apache.org_44066__home_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-7.7_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001_tempDir-002_jetty1, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=44517, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-1-001/cores} [junit4] 2> 4132850 ERROR (closeThreadPool-14940-thread-1) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 4132850 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 4132850 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.7.2 [junit4] 2> 4132850 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 4132850 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 4132850 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-05-25T14:59:23.870Z [junit4] 2> 4132933 INFO (zkConnectionManagerCallback-14942-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4132990 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 4132990 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-1-001/solr.xml [junit4] 2> 4132993 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x10993c354610008, likely client has closed socket [junit4] 2> 4133010 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored [junit4] 2> 4133010 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored [junit4] 2> 4133012 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 4133400 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false] [junit4] 2> 4133425 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:43520/solr [junit4] 2> 4133426 INFO (zkConnectionManagerCallback-14946-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4133513 INFO (zkConnectionManagerCallback-14948-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4133586 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 4133601 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.ZkController Publish node=127.0.0.1:44517_ as DOWN [junit4] 2> 4133602 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 4133602 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:44517_ [junit4] 2> 4133603 INFO (zkCallback-14920-thread-3) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 4133622 INFO (zkCallback-14938-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 4133638 INFO (zkCallback-14927-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 4133638 INFO (zkCallback-14947-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 4133733 INFO (zkConnectionManagerCallback-14955-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4133734 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2) [junit4] 2> 4133768 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4133768 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 4133866 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4133904 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4133904 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4133922 INFO (closeThreadPool-14940-thread-1) [n:127.0.0.1:44517_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-1-001/cores [junit4] 2> 4134063 INFO (OverseerCollectionConfigSetProcessor-74753335971872772-127.0.0.1:35087_-n_0000000000) [n:127.0.0.1:35087_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000002 doesn't exist. Requestor may have disconnected from ZooKeeper [junit4] 2> 4134239 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-2-001 of type NRT [junit4] 2> 4134249 WARN (closeThreadPool-14940-thread-1) [ ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time [junit4] 2> 4134250 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0) [junit4] 2> 4134250 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ... [junit4] 2> 4134250 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12 [junit4] 2> 4134270 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 4134270 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 4134270 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session node0 Scavenging every 660000ms [junit4] 2> 4134270 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@2f2b59b{/,null,AVAILABLE} [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@46441701{HTTP/1.1,[http/1.1]}{127.0.0.1:36152} [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.Server Started @4134307ms [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://lucene2-us-west.apache.org:44066/hdfs__lucene2-us-west.apache.org_44066__home_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-7.7_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001_tempDir-002_jetty2, solrconfig=solrconfig.xml, hostContext=/, hostPort=36152, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-2-001/cores} [junit4] 2> 4134271 ERROR (closeThreadPool-14940-thread-1) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.7.2 [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 4134271 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-05-25T14:59:25.291Z [junit4] 2> 4134273 INFO (zkConnectionManagerCallback-14958-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4134306 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 4134306 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-2-001/solr.xml [junit4] 2> 4134325 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored [junit4] 2> 4134325 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored [junit4] 2> 4134344 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 4134929 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false] [junit4] 2> 4135064 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:43520/solr [junit4] 2> 4135066 INFO (zkConnectionManagerCallback-14962-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4135117 INFO (zkConnectionManagerCallback-14964-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4135161 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2) [junit4] 2> 4135164 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkController Publish node=127.0.0.1:36152_ as DOWN [junit4] 2> 4135181 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 4135181 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:36152_ [junit4] 2> 4135183 INFO (zkCallback-14920-thread-3) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 4135183 INFO (zkCallback-14938-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 4135197 INFO (zkCallback-14947-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 4135205 INFO (zkCallback-14927-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 4135241 INFO (zkCallback-14963-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 4135335 INFO (zkCallback-14954-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 4135404 INFO (zkConnectionManagerCallback-14971-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4135436 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3) [junit4] 2> 4135437 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4135509 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 4135579 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4135621 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4135621 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4135636 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-2-001/cores [junit4] 2> 4135851 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-3-001 of type NRT [junit4] 2> 4135878 WARN (closeThreadPool-14940-thread-2) [ ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time [junit4] 2> 4135878 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0) [junit4] 2> 4135878 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ... [junit4] 2> 4135878 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12 [junit4] 2> 4135898 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 4135898 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 4135898 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.session node0 Scavenging every 660000ms [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@42983086{/,null,AVAILABLE} [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.AbstractConnector Started ServerConnector@1ddbb34{HTTP/1.1,[http/1.1]}{127.0.0.1:40360} [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.Server Started @4135936ms [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://lucene2-us-west.apache.org:44066/hdfs__lucene2-us-west.apache.org_44066__home_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-7.7_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001_tempDir-002_jetty3, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=40360, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-3-001/cores} [junit4] 2> 4135899 ERROR (closeThreadPool-14940-thread-2) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.7.2 [junit4] 2> 4135899 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 4135900 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 4135900 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-05-25T14:59:26.920Z [junit4] 2> 4135901 INFO (zkConnectionManagerCallback-14974-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4135934 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 4135934 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-3-001/solr.xml [junit4] 2> 4135942 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored [junit4] 2> 4135942 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored [junit4] 2> 4135975 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 4136706 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false] [junit4] 2> 4136733 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:43520/solr [junit4] 2> 4136785 INFO (zkConnectionManagerCallback-14978-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4136846 INFO (zkConnectionManagerCallback-14980-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4136861 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3) [junit4] 2> 4136969 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.ZkController Publish node=127.0.0.1:40360_ as DOWN [junit4] 2> 4136970 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 4136970 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:40360_ [junit4] 2> 4136971 INFO (zkCallback-14927-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4136992 INFO (zkCallback-14938-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4136992 INFO (zkCallback-14947-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4137004 INFO (zkCallback-14954-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4137016 INFO (zkCallback-14963-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4137016 INFO (zkCallback-14920-thread-2) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4137024 INFO (zkCallback-14979-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4137048 INFO (zkCallback-14970-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 4137198 INFO (zkConnectionManagerCallback-14987-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4137207 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 4137208 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4137243 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 4137262 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 4 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-4-001 of type NRT [junit4] 2> 4137262 WARN (closeThreadPool-14940-thread-1) [ ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time [junit4] 2> 4137263 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0) [junit4] 2> 4137263 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ... [junit4] 2> 4137263 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12 [junit4] 2> 4137316 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4137354 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4137354 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4137368 INFO (closeThreadPool-14940-thread-2) [n:127.0.0.1:40360_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-3-001/cores [junit4] 2> 4137462 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 4137462 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 4137462 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.session node0 Scavenging every 660000ms [junit4] 2> 4137535 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@634629fe{/,null,AVAILABLE} [junit4] 2> 4137652 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@1b304189{HTTP/1.1,[http/1.1]}{127.0.0.1:35200} [junit4] 2> 4137652 INFO (closeThreadPool-14940-thread-1) [ ] o.e.j.s.Server Started @4137689ms [junit4] 2> 4137652 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://lucene2-us-west.apache.org:44066/hdfs__lucene2-us-west.apache.org_44066__home_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-7.7_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001_tempDir-002_jetty4, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=35200, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-4-001/cores} [junit4] 2> 4137652 ERROR (closeThreadPool-14940-thread-1) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 4137652 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 4137652 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.7.2 [junit4] 2> 4137653 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 4137653 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 4137653 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-05-25T14:59:28.673Z [junit4] 2> 4137725 INFO (zkConnectionManagerCallback-14990-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4137762 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 4137762 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-4-001/solr.xml [junit4] 2> 4137765 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored [junit4] 2> 4137765 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored [junit4] 2> 4137783 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 4138294 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false] [junit4] 2> 4138330 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:43520/solr [junit4] 2> 4138397 INFO (zkConnectionManagerCallback-14994-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4138563 INFO (zkConnectionManagerCallback-14996-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4138586 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 4138605 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkController Publish node=127.0.0.1:35200_ as DOWN [junit4] 2> 4138605 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 4138605 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:35200_ [junit4] 2> 4138607 INFO (zkCallback-14954-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138607 INFO (zkCallback-14970-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138607 INFO (zkCallback-14963-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138607 INFO (zkCallback-14947-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138607 INFO (zkCallback-14979-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138628 INFO (zkCallback-14927-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138628 INFO (zkCallback-14938-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138628 INFO (zkCallback-14920-thread-3) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138629 INFO (zkCallback-14986-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138646 INFO (zkCallback-14995-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 4138678 INFO (TEST-StressHdfsTest.test-seed#[8DB2B1576E8C69DC]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 5 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-5-001 of type NRT [junit4] 2> 4138678 WARN (closeThreadPool-14940-thread-2) [ ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time [junit4] 2> 4138678 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0) [junit4] 2> 4138678 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ... [junit4] 2> 4138678 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12 [junit4] 2> 4138790 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 4138790 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 4138790 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.session node0 Scavenging every 660000ms [junit4] 2> 4138847 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@8821b9e{/,null,AVAILABLE} [junit4] 2> 4138883 INFO (zkConnectionManagerCallback-15003-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4138884 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (5) [junit4] 2> 4138885 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:43520/solr ready [junit4] 2> 4138895 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 4138899 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.AbstractConnector Started ServerConnector@4465353c{HTTP/1.1,[http/1.1]}{127.0.0.1:41635} [junit4] 2> 4138899 INFO (closeThreadPool-14940-thread-2) [ ] o.e.j.s.Server Started @4138936ms [junit4] 2> 4138899 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://lucene2-us-west.apache.org:44066/hdfs__lucene2-us-west.apache.org_44066__home_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-7.7_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001_tempDir-002_jetty5, solrconfig=solrconfig.xml, hostContext=/, hostPort=41635, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-5-001/cores} [junit4] 2> 4138952 ERROR (closeThreadPool-14940-thread-2) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 4138952 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 4138952 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.7.2 [junit4] 2> 4138952 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 4138952 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 4138952 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-05-25T14:59:29.972Z [junit4] 2> 4139006 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4139037 INFO (zkConnectionManagerCallback-15006-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4139039 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 4139039 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-5-001/solr.xml [junit4] 2> 4139059 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored [junit4] 2> 4139059 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored [junit4] 2> 4139061 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 4139095 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4139095 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c88580 [junit4] 2> 4139096 INFO (closeThreadPool-14940-thread-1) [ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/shard-4-001/cores [junit4] 2> 4139636 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false] [junit4] 2> 4139711 INFO (closeThreadPool-14940-thread-2) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:43520/solr [junit4] 2> 4139732 INFO (zkConnectionManagerCallback-15010-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4139791 INFO (zkConnectionManagerCallback-15012-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 4 [...truncated too long message...] 7s J2 | StressHdfsTest.test <<< [junit4] > Throwable #1: java.lang.AssertionError: There are still nodes recoverying - waited for 330 seconds [junit4] > at __randomizedtesting.SeedInfo.seed([8DB2B1576E8C69DC:5E68E8DC0700424]:0) [junit4] > at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:195) [junit4] > at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:143) [junit4] > at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:138) [junit4] > at org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:1008) [junit4] > at org.apache.solr.cloud.hdfs.StressHdfsTest.test(StressHdfsTest.java:117) [junit4] > at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1075) [junit4] > at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1047) [junit4] > at java.lang.Thread.run(Thread.java:748) [junit4] 2> 4552925 WARN (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.h.h.s.d.DirectoryScanner DirectoryScanner: shutdown has been called [junit4] 2> 4552935 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Stopped HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:0 [junit4] 2> 4553037 WARN (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data3/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data4/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:44066) [ ] o.a.h.h.s.d.IncrementalBlockReportManager IncrementalBlockReportManager interrupted [junit4] 2> 4553037 WARN (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data3/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data4/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:44066) [ ] o.a.h.h.s.d.DataNode Ending block pool service for: Block pool BP-1388971160-127.0.0.1-1558796347568 (Datanode Uuid 89dabc1c-d59e-4018-be86-851f660b9d6b) service to lucene2-us-west.apache.org/127.0.0.1:44066 [junit4] 2> 4553039 WARN (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.h.h.s.d.DirectoryScanner DirectoryScanner: shutdown has been called [junit4] 2> 4553081 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Stopped HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:0 [junit4] 2> 4553183 WARN (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data1/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data2/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:44066) [ ] o.a.h.h.s.d.IncrementalBlockReportManager IncrementalBlockReportManager interrupted [junit4] 2> 4553183 WARN (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data1/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001/tempDir-001/hdfsBaseDir/data/data2/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:44066) [ ] o.a.h.h.s.d.DataNode Ending block pool service for: Block pool BP-1388971160-127.0.0.1-1558796347568 (Datanode Uuid da90ac70-e627-48d4-bb5e-631aaddb776e) service to lucene2-us-west.apache.org/127.0.0.1:44066 [junit4] 2> 4553214 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.m.log Stopped HttpServer2$selectchannelconnectorwithsafestar...@lucene2-us-west.apache.org:0 [junit4] 2> 4553341 INFO (SUITE-StressHdfsTest-seed#[8DB2B1576E8C69DC]-worker) [ ] o.a.s.s.h.HdfsDirectory Closing hdfs directory hdfs://lucene2-us-west.apache.org:44066/solr_hdfs_home/delete_data_dir/core_node2/data [junit4] 2> NOTE: leaving temporary files on disk at: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.StressHdfsTest_8DB2B1576E8C69DC-001 [junit4] 2> May 25, 2019 3:06:24 PM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks [junit4] 2> WARNING: Will linger awaiting termination of 32 leaked thread(s). [junit4] 2> NOTE: test params are: codec=Asserting(Lucene70), sim=RandomSimilarity(queryNorm=true): {}, locale=sr-Latn-ME, timezone=NZ-CHAT [junit4] 2> NOTE: Linux 4.4.0-112-generic amd64/Oracle Corporation 1.8.0_191 (64-bit)/cpus=4,threads=4,free=302034976,total=524288000 [junit4] 2> NOTE: All tests run in this JVM: [TestCustomDocTransformer, AddSchemaFieldsUpdateProcessorFactoryTest, TestSimLargeCluster, CacheHeaderTest, TestReloadDeadlock, TestNumericRangeQuery64, TestDocSet, ExitableDirectoryReaderTest, TestXmlQParser, TestDistributedStatsComponentCardinality, RangeFacetCloudTest, TermVectorComponentDistributedTest, TestCloudJSONFacetSKG, CdcrBidirectionalTest, IndexSchemaRuntimeFieldTest, TestSQLHandler, DistributedSuggestComponentTest, SmileWriterTest, TestSurroundQueryParser, NoCacheHeaderTest, IgnoreCommitOptimizeUpdateProcessorFactoryTest, TestSerializedLuceneMatchVersion, ExecutePlanActionTest, PreAnalyzedUpdateProcessorTest, CopyFieldTest, TestImpersonationWithHadoopAuth, HdfsRestartWhileUpdatingTest, TestCorePropertiesReload, SolrShardReporterTest, NodeAddedTriggerIntegrationTest, TestCollectionsAPIViaSolrCloudCluster, DistributedExpandComponentTest, TestManagedSchema, TestPhraseSuggestions, Tagger2Test, RecoveryAfterSoftCommitTest, TestPivotHelperCode, TemplateUpdateProcessorTest, SuggesterTest, SpatialRPTFieldTypeTest, DeleteInactiveReplicaTest, SolrGangliaReporterTest, SignificantTermsQParserPluginTest, DocValuesTest, TestSimpleQParserPlugin, SchemaApiFailureTest, TestCloudSearcherWarming, URLClassifyProcessorTest, TestBlobHandler, TestCoreBackup, SpellPossibilityIteratorTest, TestRecovery, ClassificationUpdateProcessorTest, TestLegacyTerms, TestCloudDeleteByQuery, TestFieldCacheVsDocValues, TestJavabinTupleStreamParser, ShardsWhitelistTest, TestComplexPhraseQParserPlugin, IndexBasedSpellCheckerTest, TestReplicaProperties, CoreMergeIndexesAdminHandlerTest, TestAddFieldRealTimeGet, TestSolrCloudWithDelegationTokens, TestCloudPivotFacet, SimpleFacetsTest, UpdateLogTest, TestBulkSchemaConcurrent, MetricsHistoryWithAuthIntegrationTest, TestNestedUpdateProcessor, TestDistributedGrouping, SolrJmxReporterTest, ReplicaListTransformerTest, CdcrWithNodesRestartsTest, DistributedFacetPivotLongTailTest, ShardSplitTest, TestSolrJ, TestBulkSchemaAPI, TestHashQParserPlugin, TestFastLRUCache, MultiTermTest, CdcrReplicationHandlerTest, TestDocumentBuilder, ConcurrentDeleteAndCreateCollectionTest, TestWordDelimiterFilterFactory, ZkStateWriterTest, SharedFSAutoReplicaFailoverTest, HdfsTlogReplayBufferedWhileIndexingTest, TestTestInjection, TestManagedResource, TestSolrXml, ConfigureRecoveryStrategyTest, MaxSizeAutoCommitTest, OutOfBoxZkACLAndCredentialsProvidersTest, SearchRateTriggerIntegrationTest, TestDocTermOrds, BadIndexSchemaTest, HdfsNNFailoverTest, TestNoOpRegenerator, V2StandaloneTest, ReplaceNodeNoTargetTest, TestShardHandlerFactory, SyncSliceTest, TestDistribIDF, TestManagedSynonymFilterFactory, JavabinLoaderTest, TestStreamBody, VMParamsZkACLAndCredentialsProvidersTest, TestDeleteCollectionOnDownNodes, SolrIndexMetricsTest, TestTlogReplica, TestRuleBasedAuthorizationPlugin, TestSchemaResource, TestSolrDeletionPolicy1, NodeLostTriggerTest, StressHdfsTest] [junit4] Completed [454/842 (2!)] on J2 in 439.72s, 1 test, 1 failure <<< FAILURES! [...truncated 1330 lines...] [junit4] JVM J0: stdout was not empty, see: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/solr/build/solr-core/test/temp/junit4-J0-20190525_135030_8862205831055026419832.sysout [junit4] >>> JVM J0 emitted unexpected output (verbatim) ---- [junit4] java.lang.OutOfMemoryError: GC overhead limit exceeded [junit4] Dumping heap to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/heapdumps/java_pid21955.hprof ... [junit4] Heap dump file created [462752484 bytes in 8.087 secs] [junit4] <<< JVM J0: EOF ---- [...truncated 8924 lines...] BUILD FAILED /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/build.xml:651: The following error occurred while executing this line: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.7/checkout/build.xml:585: Some of the tests produced a heap dump, but did not fail. Maybe a suppressed OutOfMemoryError? Dumps created: * java_pid21955.hprof Total time: 449 minutes 46 seconds Build step 'Invoke Ant' marked build as failure Archiving artifacts Recording test results Email was triggered for: Failure - Any Sending email for trigger: Failure - Any
--------------------------------------------------------------------- To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org