[JENKINS] Lucene-Solr-Tests-trunk-Java7 - Build # 4901 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-Tests-trunk-Java7/4901/ 1 tests failed. REGRESSION: org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.testDistribSearch Error Message: expected:<0> but was:<1> Stack Trace: java.lang.AssertionError: expected:<0> but was:<1> at __randomizedtesting.SeedInfo.seed([F3FB642B77797948:721DEA3300261974]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.junit.Assert.assertEquals(Assert.java:456) at org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.doTest(ChaosMonkeySafeLeaderTest.java:153) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$
[JENKINS] Lucene-Solr-5.x-Windows (64bit/jdk1.7.0_67) - Build # 4248 - Failure!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-Windows/4248/ Java: 64bit/jdk1.7.0_67 -XX:-UseCompressedOops -XX:+UseG1GC 1 tests failed. REGRESSION: org.apache.solr.cloud.TestCloudPivotFacet.testDistribSearch Error Message: init query failed: {main(facet=true&facet.pivot=pivot_tf%2Cpivot_l&facet.pivot=pivot_td%2Cdense_pivot_ti%2Cpivot_dt&facet.limit=11&facet.offset=7),extra(rows=0&q=*%3A*&fq=id%3A%5B*+TO+260%5D)}: No live SolrServers available to handle this request:[http://127.0.0.1:61656/sn_z/uo/collection1, http://127.0.0.1:61666/sn_z/uo/collection1, http://127.0.0.1:61637/sn_z/uo/collection1, http://127.0.0.1:61646/sn_z/uo/collection1] Stack Trace: java.lang.RuntimeException: init query failed: {main(facet=true&facet.pivot=pivot_tf%2Cpivot_l&facet.pivot=pivot_td%2Cdense_pivot_ti%2Cpivot_dt&facet.limit=11&facet.offset=7),extra(rows=0&q=*%3A*&fq=id%3A%5B*+TO+260%5D)}: No live SolrServers available to handle this request:[http://127.0.0.1:61656/sn_z/uo/collection1, http://127.0.0.1:61666/sn_z/uo/collection1, http://127.0.0.1:61637/sn_z/uo/collection1, http://127.0.0.1:61646/sn_z/uo/collection1] at __randomizedtesting.SeedInfo.seed([EB4E61E19A7464B9:6AA8EFF9ED2B0485]:0) at org.apache.solr.cloud.TestCloudPivotFacet.assertPivotCountsAreCorrect(TestCloudPivotFacet.java:223) at org.apache.solr.cloud.TestCloudPivotFacet.doTest(TestCloudPivotFacet.java:197) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.GeneratedMethodAccessor36.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
[jira] [Commented] (SOLR-6249) Schema API changes return success before all cores are updated
[ https://issues.apache.org/jira/browse/SOLR-6249?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157709#comment-14157709 ] Noble Paul commented on SOLR-6249: -- do we need to make a copy here in ZkController? the only synchronization is on reconnectListeners and it's OK if new listeners addition should wait. {code:java} OnReconnect[] toNotify = null; synchronized (reconnectListeners) { toNotify = reconnectListeners.toArray(new OnReconnect[0]); } {code} the rest seems to be fine > Schema API changes return success before all cores are updated > -- > > Key: SOLR-6249 > URL: https://issues.apache.org/jira/browse/SOLR-6249 > Project: Solr > Issue Type: Improvement > Components: Schema and Analysis, SolrCloud >Reporter: Gregory Chanan >Assignee: Timothy Potter > Attachments: SOLR-6249.patch, SOLR-6249.patch, SOLR-6249.patch, > SOLR-6249_reconnect.patch, SOLR-6249_reconnect.patch > > > See SOLR-6137 for more details. > The basic issue is that Schema API changes return success when the first core > is updated, but other cores asynchronously read the updated schema from > ZooKeeper. > So a client application could make a Schema API change and then index some > documents based on the new schema that may fail on other nodes. > Possible fixes: > 1) Make the Schema API calls synchronous > 2) Give the client some ability to track the state of the schema. They can > already do this to a certain extent by checking the Schema API on all the > replicas and verifying that the field has been added, though this is pretty > cumbersome. Maybe it makes more sense to do this sort of thing on the > collection level, i.e. Schema API changes return the zk version to the > client. We add an API to return the current zk version. On a replica, if > the zk version is >= the version the client has, the client knows that > replica has at least seen the schema change. We could also provide an API to > do the distribution and checking across the different replicas of the > collection so that clients don't need ot do that themselves. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-NightlyTests-trunk - Build # 646 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-trunk/646/ 4 tests failed. REGRESSION: org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.testDistribSearch Error Message: The Monkey ran for over 20 seconds and no jetties were stopped - this is worth investigating! Stack Trace: java.lang.AssertionError: The Monkey ran for over 20 seconds and no jetties were stopped - this is worth investigating! at __randomizedtesting.SeedInfo.seed([9A887F063BF0EBD8:1B6EF11E4CAF8BE4]:0) at org.junit.Assert.fail(Assert.java:93) at org.apache.solr.cloud.ChaosMonkey.stopTheMonkey(ChaosMonkey.java:535) at org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.doTest(ChaosMonkeySafeLeaderTest.java:140) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRun
[JENKINS] Lucene-Solr-trunk-Linux (64bit/jdk1.7.0_67) - Build # 11374 - Failure!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/11374/ Java: 64bit/jdk1.7.0_67 -XX:+UseCompressedOops -XX:+UseG1GC 2 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.HttpPartitionTest Error Message: 1 thread leaked from SUITE scope at org.apache.solr.cloud.HttpPartitionTest: 1) Thread[id=7808, name=Thread-2805, state=RUNNABLE, group=TGRP-HttpPartitionTest] at java.net.SocketInputStream.socketRead0(Native Method) at java.net.SocketInputStream.read(SocketInputStream.java:152) at java.net.SocketInputStream.read(SocketInputStream.java:122) at org.apache.http.impl.io.AbstractSessionInputBuffer.fillBuffer(AbstractSessionInputBuffer.java:160) at org.apache.http.impl.io.SocketInputBuffer.fillBuffer(SocketInputBuffer.java:84) at org.apache.http.impl.io.AbstractSessionInputBuffer.readLine(AbstractSessionInputBuffer.java:273) at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:140) at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:57) at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:260) at org.apache.http.impl.AbstractHttpClientConnection.receiveResponseHeader(AbstractHttpClientConnection.java:283) at org.apache.http.impl.conn.DefaultClientConnection.receiveResponseHeader(DefaultClientConnection.java:251) at org.apache.http.impl.conn.ManagedClientConnectionImpl.receiveResponseHeader(ManagedClientConnectionImpl.java:197) at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:271) at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:123) at org.apache.http.impl.client.DefaultRequestDirector.tryExecute(DefaultRequestDirector.java:682) at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:486) at org.apache.http.impl.client.AbstractHttpClient.doExecute(AbstractHttpClient.java:863) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:106) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:57) at org.apache.solr.client.solrj.impl.HttpSolrServer.executeMethod(HttpSolrServer.java:466) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:215) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:211) at org.apache.solr.cloud.ZkController.waitForLeaderToSeeDownState(ZkController.java:1607) at org.apache.solr.cloud.ZkController.registerAllCoresAsDown(ZkController.java:406) at org.apache.solr.cloud.ZkController.access$000(ZkController.java:93) at org.apache.solr.cloud.ZkController$1.command(ZkController.java:257) at org.apache.solr.common.cloud.ConnectionManager$1$1.run(ConnectionManager.java:166) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.cloud.HttpPartitionTest: 1) Thread[id=7808, name=Thread-2805, state=RUNNABLE, group=TGRP-HttpPartitionTest] at java.net.SocketInputStream.socketRead0(Native Method) at java.net.SocketInputStream.read(SocketInputStream.java:152) at java.net.SocketInputStream.read(SocketInputStream.java:122) at org.apache.http.impl.io.AbstractSessionInputBuffer.fillBuffer(AbstractSessionInputBuffer.java:160) at org.apache.http.impl.io.SocketInputBuffer.fillBuffer(SocketInputBuffer.java:84) at org.apache.http.impl.io.AbstractSessionInputBuffer.readLine(AbstractSessionInputBuffer.java:273) at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:140) at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:57) at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:260) at org.apache.http.impl.AbstractHttpClientConnection.receiveResponseHeader(AbstractHttpClientConnection.java:283) at org.apache.http.impl.conn.DefaultClientConnection.receiveResponseHeader(DefaultClientConnection.java:251) at org.apache.http.impl.conn.ManagedClientConnectionImpl.receiveResponseHeader(ManagedClientConnectionImpl.java:197) at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:271) at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:123) at org.apache.http.impl.client.DefaultRequestDirector.tryExecute(DefaultRequestDirector.java:682) at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.
[jira] [Updated] (SOLR-6578) Update commons-io dependency to the latest 2.4 version
[ https://issues.apache.org/jira/browse/SOLR-6578?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Shawn Heisey updated SOLR-6578: --- Attachment: SOLR-6578.patch Patch for branch_5x updating commons-io to 2.4. > Update commons-io dependency to the latest 2.4 version > -- > > Key: SOLR-6578 > URL: https://issues.apache.org/jira/browse/SOLR-6578 > Project: Solr > Issue Type: Improvement >Reporter: Karol Abramczyk >Priority: Minor > Attachments: SOLR-6578.patch > > > Latest commons-io version is 2.4, but Solr 4.10 still uses 2.3. It is also > required by the Couchbase plugin for Solr (SOLR-6266) that Solr uses > commons-io-2.4. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6530) Commits under network partition can put any node in down state
[ https://issues.apache.org/jira/browse/SOLR-6530?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157648#comment-14157648 ] ASF subversion and git services commented on SOLR-6530: --- Commit 1629108 from sha...@apache.org in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1629108 ] SOLR-6530: Commits under network partitions can put any node in down state > Commits under network partition can put any node in down state > -- > > Key: SOLR-6530 > URL: https://issues.apache.org/jira/browse/SOLR-6530 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Shalin Shekhar Mangar >Priority: Critical > Fix For: 5.0, Trunk > > Attachments: SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch > > > Commits are executed by any node in SolrCloud i.e. they're not routed via the > leader like other updates. > # Suppose there's 1 collection, 1 shard, 2 replicas (A and B) and A is the > leader > # Suppose a commit request is made to node B during a time where B cannot > talk to A due to a partition for any reason (failing switch, heavy GC, > whatever) > # B fails to distribute the commit to A (times out) and asks A to recover > # This was okay earlier because a leader just ignores recovery requests but > with leader initiated recovery code, B puts A in the "down" state and A can > never get out of that state. > tl;dr; During network partitions, if enough commit/optimize requests are sent > to the cluster, all the nodes in the cluster will eventually be marked as > "down". -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-5.x-Linux (32bit/jdk1.7.0_67) - Build # 11221 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-Linux/11221/ Java: 32bit/jdk1.7.0_67 -client -XX:+UseSerialGC 1 tests failed. REGRESSION: org.apache.solr.TestDistributedSearch.testDistribSearch Error Message: Request took too long during query expansion. Terminating request. Stack Trace: org.apache.solr.client.solrj.impl.HttpSolrServer$RemoteSolrException: Request took too long during query expansion. Terminating request. at __randomizedtesting.SeedInfo.seed([377AFD4F005F159A:B69C7357770075A6]:0) at org.apache.solr.client.solrj.impl.HttpSolrServer.executeMethod(HttpSolrServer.java:570) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:215) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:211) at org.apache.solr.client.solrj.request.QueryRequest.process(QueryRequest.java:91) at org.apache.solr.client.solrj.SolrServer.query(SolrServer.java:301) at org.apache.solr.TestDistributedSearch.queryPartialResults(TestDistributedSearch.java:596) at org.apache.solr.TestDistributedSearch.doTest(TestDistributedSearch.java:499) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:875) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.eval
[jira] [Commented] (SOLR-6530) Commits under network partition can put any node in down state
[ https://issues.apache.org/jira/browse/SOLR-6530?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157646#comment-14157646 ] ASF subversion and git services commented on SOLR-6530: --- Commit 1629107 from sha...@apache.org in branch 'dev/trunk' [ https://svn.apache.org/r1629107 ] SOLR-6530: Reopen the socket proxy after test finishes > Commits under network partition can put any node in down state > -- > > Key: SOLR-6530 > URL: https://issues.apache.org/jira/browse/SOLR-6530 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Shalin Shekhar Mangar >Priority: Critical > Fix For: 5.0, Trunk > > Attachments: SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch > > > Commits are executed by any node in SolrCloud i.e. they're not routed via the > leader like other updates. > # Suppose there's 1 collection, 1 shard, 2 replicas (A and B) and A is the > leader > # Suppose a commit request is made to node B during a time where B cannot > talk to A due to a partition for any reason (failing switch, heavy GC, > whatever) > # B fails to distribute the commit to A (times out) and asks A to recover > # This was okay earlier because a leader just ignores recovery requests but > with leader initiated recovery code, B puts A in the "down" state and A can > never get out of that state. > tl;dr; During network partitions, if enough commit/optimize requests are sent > to the cluster, all the nodes in the cluster will eventually be marked as > "down". -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6578) Update commons-io dependency to the latest 2.4 version
[ https://issues.apache.org/jira/browse/SOLR-6578?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157644#comment-14157644 ] Shawn Heisey commented on SOLR-6578: I've tried to update this in branch_5x, but there seem to be tons of tests failing. I haven't yet been able to determine if they are failing because of the ivy dependency change or because of the other large-scale changes made to pull applicable trunk changes into 5x.Later I'll be able to try it on the 4_10 branch, but I think all my time for working on this tonight has evaporated. > Update commons-io dependency to the latest 2.4 version > -- > > Key: SOLR-6578 > URL: https://issues.apache.org/jira/browse/SOLR-6578 > Project: Solr > Issue Type: Improvement >Reporter: Karol Abramczyk >Priority: Minor > > Latest commons-io version is 2.4, but Solr 4.10 still uses 2.3. It is also > required by the Couchbase plugin for Solr (SOLR-6266) that Solr uses > commons-io-2.4. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5969) Add Lucene50Codec
[ https://issues.apache.org/jira/browse/LUCENE-5969?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157633#comment-14157633 ] ASF subversion and git services commented on LUCENE-5969: - Commit 1629106 from [~rcmuir] in branch 'dev/branches/lucene5969' [ https://svn.apache.org/r1629106 ] LUCENE-5969: clear nocommit > Add Lucene50Codec > - > > Key: LUCENE-5969 > URL: https://issues.apache.org/jira/browse/LUCENE-5969 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5969.patch, LUCENE-5969.patch > > > Spinoff from LUCENE-5952: > * Fix .si to write Version as 3 ints, not a String that requires parsing at > read time. > * Lucene42TermVectorsFormat should not use the same codecName as > Lucene41StoredFieldsFormat > It would also be nice if we had a "bumpCodecVersion" script so rolling a new > codec is not so daunting. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5969) Add Lucene50Codec
[ https://issues.apache.org/jira/browse/LUCENE-5969?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157618#comment-14157618 ] ASF subversion and git services commented on LUCENE-5969: - Commit 1629105 from [~rcmuir] in branch 'dev/branches/lucene5969' [ https://svn.apache.org/r1629105 ] LUCENE-5969: don't use indexfilenames in these codecs > Add Lucene50Codec > - > > Key: LUCENE-5969 > URL: https://issues.apache.org/jira/browse/LUCENE-5969 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5969.patch, LUCENE-5969.patch > > > Spinoff from LUCENE-5952: > * Fix .si to write Version as 3 ints, not a String that requires parsing at > read time. > * Lucene42TermVectorsFormat should not use the same codecName as > Lucene41StoredFieldsFormat > It would also be nice if we had a "bumpCodecVersion" script so rolling a new > codec is not so daunting. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-5.x-Java7 - Build # 2150 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-Tests-5.x-Java7/2150/ 2 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestReplicationHandlerBackup Error Message: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestReplicationHandlerBackup: 1) Thread[id=3679, name=Thread-1655, state=RUNNABLE, group=TGRP-TestReplicationHandlerBackup] at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:579) at java.net.Socket.connect(Socket.java:528) at sun.net.NetworkClient.doConnect(NetworkClient.java:180) at sun.net.www.http.HttpClient.openServer(HttpClient.java:432) at sun.net.www.http.HttpClient.openServer(HttpClient.java:527) at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:652) at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1323) at java.net.URL.openStream(URL.java:1037) at org.apache.solr.handler.TestReplicationHandlerBackup$BackupThread.run(TestReplicationHandlerBackup.java:318) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestReplicationHandlerBackup: 1) Thread[id=3679, name=Thread-1655, state=RUNNABLE, group=TGRP-TestReplicationHandlerBackup] at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:579) at java.net.Socket.connect(Socket.java:528) at sun.net.NetworkClient.doConnect(NetworkClient.java:180) at sun.net.www.http.HttpClient.openServer(HttpClient.java:432) at sun.net.www.http.HttpClient.openServer(HttpClient.java:527) at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:652) at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1323) at java.net.URL.openStream(URL.java:1037) at org.apache.solr.handler.TestReplicationHandlerBackup$BackupThread.run(TestReplicationHandlerBackup.java:318) at __randomizedtesting.SeedInfo.seed([99330B0B38B27B6B]:0) FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestReplicationHandlerBackup Error Message: There are still zombie threads that couldn't be terminated:1) Thread[id=3679, name=Thread-1655, state=RUNNABLE, group=TGRP-TestReplicationHandlerBackup] at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:579) at java.net.Socket.connect(Socket.java:528) at sun.net.NetworkClient.doConnect(NetworkClient.java:180) at sun.net.www.http.HttpClient.openServer(HttpClient.java:432) at sun.net.www.http.HttpClient.openServer(HttpClient.java:527) at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:652) at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1323) at java.net.URL.openStream(URL.java:1037) at org.apache.solr.handler.TestReplicationHandlerBackup$BackupThread.run(TestReplicationHandlerBackup.java:318) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie threads that couldn't be terminated: 1) Thread[id=3679, name=Thread-1655, state=RUNNABLE, group=TGRP-TestReplicationHandlerBackup] at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:579) at java.net.Socket.connect(Socket.java:528) at sun.net.NetworkClient.doConnect(NetworkClient.java:180) at sun.net.
[JENKINS] Lucene-Solr-Tests-trunk-Java7 - Build # 4900 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-Tests-trunk-Java7/4900/ 1 tests failed. REGRESSION: org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest.testDistribSearch Error Message: There were too many update fails - we expect it can happen, but shouldn't easily Stack Trace: java.lang.AssertionError: There were too many update fails - we expect it can happen, but shouldn't easily at __randomizedtesting.SeedInfo.seed([A2B11CF286DF423A:235792EAF1802206]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertFalse(Assert.java:68) at org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest.doTest(ChaosMonkeyNothingIsSafeTest.java:223) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl
[JENKINS] Lucene-Solr-5.x-Linux (32bit/jdk1.7.0_67) - Build # 11220 - Failure!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-Linux/11220/ Java: 32bit/jdk1.7.0_67 -server -XX:+UseParallelGC 1 tests failed. REGRESSION: org.apache.solr.cloud.DeleteReplicaTest.testDistribSearch Error Message: No live SolrServers available to handle this request:[http://127.0.0.1:54071/a_ab, http://127.0.0.1:59094/a_ab, http://127.0.0.1:44697/a_ab, http://127.0.0.1:33422/a_ab, http://127.0.0.1:37863/a_ab] Stack Trace: org.apache.solr.client.solrj.SolrServerException: No live SolrServers available to handle this request:[http://127.0.0.1:54071/a_ab, http://127.0.0.1:59094/a_ab, http://127.0.0.1:44697/a_ab, http://127.0.0.1:33422/a_ab, http://127.0.0.1:37863/a_ab] at __randomizedtesting.SeedInfo.seed([285AFDBFD77D0739:A9BC73A7A0226705]:0) at org.apache.solr.client.solrj.impl.LBHttpSolrServer.request(LBHttpSolrServer.java:322) at org.apache.solr.client.solrj.impl.CloudSolrServer.sendRequest(CloudSolrServer.java:880) at org.apache.solr.client.solrj.impl.CloudSolrServer.requestWithRetryOnStaleState(CloudSolrServer.java:658) at org.apache.solr.client.solrj.impl.CloudSolrServer.request(CloudSolrServer.java:601) at org.apache.solr.cloud.DeleteReplicaTest.removeAndWaitForReplicaGone(DeleteReplicaTest.java:172) at org.apache.solr.cloud.DeleteReplicaTest.deleteLiveReplicaTest(DeleteReplicaTest.java:145) at org.apache.solr.cloud.DeleteReplicaTest.doTest(DeleteReplicaTest.java:89) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter
[jira] [Commented] (SOLR-6426) SolrZkClient clean can fail due to a race with children nodes.
[ https://issues.apache.org/jira/browse/SOLR-6426?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157445#comment-14157445 ] Jessica Cheng Mallet commented on SOLR-6426: The only thing that I'm worried with regarding to the racing clients is that before, this code will fail (which is what you're trying to fix here), but now there might be a risk of infinite recursion here causing stack overflow if it keeps coming back to this point and finding more children after it thinks it deleted all of them. In practice it probably won't happen, but it just feels a bit scary. Maybe that part can be made iterative instead (with a maximum bail-out number of tries)? > SolrZkClient clean can fail due to a race with children nodes. > -- > > Key: SOLR-6426 > URL: https://issues.apache.org/jira/browse/SOLR-6426 > Project: Solr > Issue Type: Bug >Reporter: Mark Miller >Assignee: Mark Miller >Priority: Minor > Fix For: 5.0, Trunk > > -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-5.x-MacOSX (64bit/jdk1.8.0) - Build # 1824 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-MacOSX/1824/ Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC 1 tests failed. REGRESSION: org.apache.solr.cloud.CollectionsAPIDistributedZkTest.testDistribSearch Error Message: Error CREATEing SolrCore 'halfcollection_shard1_replica1': Unable to create core [halfcollection_shard1_replica1] Caused by: Could not get shard id for core: halfcollection_shard1_replica1 Stack Trace: org.apache.solr.client.solrj.impl.HttpSolrServer$RemoteSolrException: Error CREATEing SolrCore 'halfcollection_shard1_replica1': Unable to create core [halfcollection_shard1_replica1] Caused by: Could not get shard id for core: halfcollection_shard1_replica1 at __randomizedtesting.SeedInfo.seed([550064666E64D470:D4E6EA7E193BB44C]:0) at org.apache.solr.client.solrj.impl.HttpSolrServer.executeMethod(HttpSolrServer.java:570) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:215) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:211) at org.apache.solr.cloud.CollectionsAPIDistributedZkTest.testErrorHandling(CollectionsAPIDistributedZkTest.java:583) at org.apache.solr.cloud.CollectionsAPIDistributedZkTest.doTest(CollectionsAPIDistributedZkTest.java:205) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.GeneratedMethodAccessor49.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequ
[jira] [Commented] (SOLR-6426) SolrZkClient clean can fail due to a race with children nodes.
[ https://issues.apache.org/jira/browse/SOLR-6426?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157361#comment-14157361 ] Mark Miller commented on SOLR-6426: --- Also, keep in mind, this change does nothing about the risk of another client constantly adding nodes. That could race with this before and after. This is about trying to remove a node that has children - it looks like the node has no children, we try to remove it, boom it has a child. It doesn't affect a race off adding and removing children nodes. It just makes the method consistent in working how it was intended rather than this odd race fail you can get. That doesn't mean the clean method itself could not be re implemented or something, but that's another issue. > SolrZkClient clean can fail due to a race with children nodes. > -- > > Key: SOLR-6426 > URL: https://issues.apache.org/jira/browse/SOLR-6426 > Project: Solr > Issue Type: Bug >Reporter: Mark Miller >Assignee: Mark Miller >Priority: Minor > Fix For: 5.0, Trunk > > -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6426) SolrZkClient clean can fail due to a race with children nodes.
[ https://issues.apache.org/jira/browse/SOLR-6426?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157354#comment-14157354 ] Mark Miller commented on SOLR-6426: --- This should not normally happen in working Solr code, say nothing about a race with another client constantly adding nodes. I saw it happen in a test case or something - the reason it happens is unrelated - as a general client method, I don't think the method itself should randomly bail part way through depending on arbitrary timing. > SolrZkClient clean can fail due to a race with children nodes. > -- > > Key: SOLR-6426 > URL: https://issues.apache.org/jira/browse/SOLR-6426 > Project: Solr > Issue Type: Bug >Reporter: Mark Miller >Assignee: Mark Miller >Priority: Minor > Fix For: 5.0, Trunk > > -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-6582) FileSystem#newInstance does not always return a new FileSystem instance as it's javadoc says it does.
Mark Miller created SOLR-6582: - Summary: FileSystem#newInstance does not always return a new FileSystem instance as it's javadoc says it does. Key: SOLR-6582 URL: https://issues.apache.org/jira/browse/SOLR-6582 Project: Solr Issue Type: Bug Reporter: Mark Miller -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-trunk-Linux (32bit/jdk1.7.0_67) - Build # 11372 - Failure!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/11372/ Java: 32bit/jdk1.7.0_67 -server -XX:+UseSerialGC 1 tests failed. REGRESSION: org.apache.solr.cloud.LeaderInitiatedRecoveryOnCommitTest.testDistribSearch Error Message: Server refused connection at: http://127.0.0.1:48449/pwww/a Stack Trace: org.apache.solr.client.solrj.SolrServerException: Server refused connection at: http://127.0.0.1:48449/pwww/a at __randomizedtesting.SeedInfo.seed([904FC3CEE5388D3B:11A94DD69267ED07]:0) at org.apache.solr.client.solrj.impl.HttpSolrServer.executeMethod(HttpSolrServer.java:577) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:215) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:211) at org.apache.solr.cloud.BasicDistributedZkTest.createCollection(BasicDistributedZkTest.java:612) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.createCollection(AbstractFullDistribZkTestBase.java:1579) at org.apache.solr.cloud.LeaderInitiatedRecoveryOnCommitTest.multiShardTest(LeaderInitiatedRecoveryOnCommitTest.java:77) at org.apache.solr.cloud.LeaderInitiatedRecoveryOnCommitTest.doTest(LeaderInitiatedRecoveryOnCommitTest.java:71) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired
[jira] [Commented] (SOLR-6426) SolrZkClient clean can fail due to a race with children nodes.
[ https://issues.apache.org/jira/browse/SOLR-6426?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157255#comment-14157255 ] Jessica Cheng Mallet commented on SOLR-6426: Hey Mark, just took a look at this patch and there is a risk of stack overflow if children nodes are actively being added. Would you please comment on where you saw the race happen that necessitated this change? Is it better to eliminate that risk instead? > SolrZkClient clean can fail due to a race with children nodes. > -- > > Key: SOLR-6426 > URL: https://issues.apache.org/jira/browse/SOLR-6426 > Project: Solr > Issue Type: Bug >Reporter: Mark Miller >Assignee: Mark Miller >Priority: Minor > Fix For: 5.0, Trunk > > -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6581) Prepare CollapsingQParserPlugin and ExpandComponent for 5.0
[ https://issues.apache.org/jira/browse/SOLR-6581?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Joel Bernstein updated SOLR-6581: - Description: There were changes made to the CollapsingQParserPlugin and ExpandComponent in the 5x branch that were driven by changes to the Lucene Collectors API and DocValues API. This ticket is to review the 5x implementation and make any changes necessary in preparation for a 5.0 release. was: There we changes made to the CollapsingQParserPlugin and ExpandComponent in the 5x branch that were driven by changes to the Lucene Collectors API and DocValues API. This ticket is to review the 5x implementation and make any changes necessary in preparation for a 5.0 release. > Prepare CollapsingQParserPlugin and ExpandComponent for 5.0 > --- > > Key: SOLR-6581 > URL: https://issues.apache.org/jira/browse/SOLR-6581 > Project: Solr > Issue Type: Bug >Reporter: Joel Bernstein >Priority: Minor > Fix For: 5.0 > > > There were changes made to the CollapsingQParserPlugin and ExpandComponent in > the 5x branch that were driven by changes to the Lucene Collectors API and > DocValues API. This ticket is to review the 5x implementation and make any > changes necessary in preparation for a 5.0 release. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-trunk-Java7 - Build # 4899 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-Tests-trunk-Java7/4899/ 1 tests failed. REGRESSION: org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.testDistribSearch Error Message: expected:<0> but was:<1> Stack Trace: java.lang.AssertionError: expected:<0> but was:<1> at __randomizedtesting.SeedInfo.seed([DBF689FD724ACD6D:5A1007E50515AD51]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.junit.Assert.assertEquals(Assert.java:456) at org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.doTest(ChaosMonkeySafeLeaderTest.java:153) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$
[jira] [Updated] (SOLR-6513) Add a collectionsAPI call BALANCESLICEUNIQUE
[ https://issues.apache.org/jira/browse/SOLR-6513?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erick Erickson updated SOLR-6513: - Attachment: SOLR-6513.patch Preliminary patch. I'm chasing down a failing test before I put this on the review board, but if anyone wants an advance peek here it is. > Add a collectionsAPI call BALANCESLICEUNIQUE > > > Key: SOLR-6513 > URL: https://issues.apache.org/jira/browse/SOLR-6513 > Project: Solr > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson > Attachments: SOLR-6513.patch, SOLR-6513.patch > > > Another sub-task for SOLR-6491. The ability to assign a property on a > node-by-node basis is nice, but tedious to get right for a sysadmin, > especially if there are, say, 100s of nodes hosting a system. This JIRA would > essentially provide an automatic mechanism for assigning a property. This > particular command simply changes the cluster state, it doesn't do anything > like re-assign functions. > My idea for this version is fairly limited. You'd have to specify a > collection and there would be no attempt to, say, evenly distribute the > preferred leader role/property for this collection by looking at _other_ > collections. Or by looking at underlying hardware capabilities. Or > It would be a pretty simple round-robin assignment. About the only > intelligence built in would be to change as few roles/properties as possible. > Let's say that the correct number of nodes for this role turned out to be 3. > Any node currently having 3 properties for this collection would NOT be > changed. Any node having 2 properties would have one added that would be > taken from some node with > 3 properties like this. > This probably needs an optional parameter, something like > "includeInactiveNodes=true|false" > Since this is an arbitrary property, one must specify sliceUnique=true. So > for the "preferredLeader" functionality, one would specify something like: > action=BALANCESLICEUNIQUE&property=preferredLeader&proprety.value=true. > There are checks in this code that require the preferredLeader to have a t/f > value and require that sliceUnique bet true. That said, this can be called on > an arbitrary property that has only one such property per slice. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-trunk-MacOSX (64bit/jdk1.8.0) - Build # 1863 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-MacOSX/1863/ Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseG1GC 2 tests failed. REGRESSION: org.apache.solr.cloud.CollectionsAPIDistributedZkTest.testDistribSearch Error Message: Error CREATEing SolrCore 'halfcollection_shard1_replica1': Unable to create core [halfcollection_shard1_replica1] Caused by: Could not get shard id for core: halfcollection_shard1_replica1 Stack Trace: org.apache.solr.client.solrj.impl.HttpSolrServer$RemoteSolrException: Error CREATEing SolrCore 'halfcollection_shard1_replica1': Unable to create core [halfcollection_shard1_replica1] Caused by: Could not get shard id for core: halfcollection_shard1_replica1 at __randomizedtesting.SeedInfo.seed([E7F940409E88B49B:661FCE58E9D7D4A7]:0) at org.apache.solr.client.solrj.impl.HttpSolrServer.executeMethod(HttpSolrServer.java:570) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:215) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:211) at org.apache.solr.cloud.CollectionsAPIDistributedZkTest.testErrorHandling(CollectionsAPIDistributedZkTest.java:583) at org.apache.solr.cloud.CollectionsAPIDistributedZkTest.doTest(CollectionsAPIDistributedZkTest.java:205) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.GeneratedMethodAccessor40.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java
[jira] [Commented] (SOLR-6351) Let Stats Hang off of Pivots (via 'tag')
[ https://issues.apache.org/jira/browse/SOLR-6351?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157129#comment-14157129 ] Steve Molloy commented on SOLR-6351: [~vzhovtiuk] Does your patch contain changes form mine? There were some NPE as Hoss mentioned which I think I got fixed. I like the addition of tests though, so hope to get the best of both. I don't mind providing a patch combining both patches, just want to avoid us posting at about the same time again. :) > Let Stats Hang off of Pivots (via 'tag') > > > Key: SOLR-6351 > URL: https://issues.apache.org/jira/browse/SOLR-6351 > Project: Solr > Issue Type: Sub-task >Reporter: Hoss Man > Attachments: SOLR-6351.patch, SOLR-6351.patch, SOLR-6351.patch > > > he goal here is basically flip the notion of "stats.facet" on it's head, so > that instead of asking the stats component to also do some faceting > (something that's never worked well with the variety of field types and has > never worked in distributed mode) we instead ask the PivotFacet code to > compute some stats X for each leaf in a pivot. We'll do this with the > existing {{stats.field}} params, but we'll leverage the {{tag}} local param > of the {{stats.field}} instances to be able to associate which stats we want > hanging off of which {{facet.pivot}} > Example... > {noformat} > facet.pivot={!stats=s1}category,manufacturer > stats.field={!key=avg_price tag=s1 mean=true}price > stats.field={!tag=s1 min=true max=true}user_rating > {noformat} > ...with the request above, in addition to computing the min/max user_rating > and mean price (labeled "avg_price") over the entire result set, the > PivotFacet component will also include those stats for every node of the tree > it builds up when generating a pivot of the fields "category,manufacturer" -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6564) Fix failing ExitableDirectoryReader tests for Solr
[ https://issues.apache.org/jira/browse/SOLR-6564?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157130#comment-14157130 ] Anshum Gupta commented on SOLR-6564: We shouldn't see any more of (Cloud)ExitableDirectoryReaderTest failures anymore due to timing related issues. I'll leave this open for a day or 2 before closing it. > Fix failing ExitableDirectoryReader tests for Solr > -- > > Key: SOLR-6564 > URL: https://issues.apache.org/jira/browse/SOLR-6564 > Project: Solr > Issue Type: Test > Components: Tests >Reporter: Anshum Gupta >Assignee: Anshum Gupta > > ExitableDirectoryReader tests are failing as they enumerate over the terms in > less than 1ms (min timeAllowed value that case be set). Need to fix this. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6564) Fix failing ExitableDirectoryReader tests for Solr
[ https://issues.apache.org/jira/browse/SOLR-6564?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157128#comment-14157128 ] ASF subversion and git services commented on SOLR-6564: --- Commit 1629063 from [~anshumg] in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1629063 ] SOLR-6564: Fix failing CloudExitableDirectoryReader test in Solr (merge from trunk r1629062) > Fix failing ExitableDirectoryReader tests for Solr > -- > > Key: SOLR-6564 > URL: https://issues.apache.org/jira/browse/SOLR-6564 > Project: Solr > Issue Type: Test > Components: Tests >Reporter: Anshum Gupta >Assignee: Anshum Gupta > > ExitableDirectoryReader tests are failing as they enumerate over the terms in > less than 1ms (min timeAllowed value that case be set). Need to fix this. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6564) Fix failing ExitableDirectoryReader tests for Solr
[ https://issues.apache.org/jira/browse/SOLR-6564?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157126#comment-14157126 ] ASF subversion and git services commented on SOLR-6564: --- Commit 1629062 from [~anshumg] in branch 'dev/trunk' [ https://svn.apache.org/r1629062 ] SOLR-6564: Fix failing CloudExitableDirectoryReader test in Solr > Fix failing ExitableDirectoryReader tests for Solr > -- > > Key: SOLR-6564 > URL: https://issues.apache.org/jira/browse/SOLR-6564 > Project: Solr > Issue Type: Test > Components: Tests >Reporter: Anshum Gupta >Assignee: Anshum Gupta > > ExitableDirectoryReader tests are failing as they enumerate over the terms in > less than 1ms (min timeAllowed value that case be set). Need to fix this. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-6577) The ability to add or change arbitrary replica properties must not allow the system properties to be changed
[ https://issues.apache.org/jira/browse/SOLR-6577?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erick Erickson resolved SOLR-6577. -- Resolution: Fixed Fix Version/s: Trunk 5.0 > The ability to add or change arbitrary replica properties must not allow the > system properties to be changed > > > Key: SOLR-6577 > URL: https://issues.apache.org/jira/browse/SOLR-6577 > Project: Solr > Issue Type: Bug >Reporter: Erick Erickson >Assignee: Erick Erickson > Fix For: 5.0, Trunk > > Attachments: SOLR-6577.patch > > > Just realized a...significant problem with the "arbitrary property" bit > (SOLR-6512). The way I wrote it I can delete _any_ property at all, things > like > core > node_name > etc. > And when you _do_ delete some of these, interesting things happen, like the > cluster becomes unusable. Oops. > I think the right thing to do here is to automatically add a "property" > prefix to all of the arbitrary properties that a user tries to add or delete. > I'll have a patch up tomorrow for this I hope. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6577) The ability to add or change arbitrary replica properties must not allow the system properties to be changed
[ https://issues.apache.org/jira/browse/SOLR-6577?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14157036#comment-14157036 ] ASF subversion and git services commented on SOLR-6577: --- Commit 1629053 from [~erickoerickson] in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1629053 ] SOLR-6577: The ability to add or change arbitrary replica properties must not allow the system properties to be changed > The ability to add or change arbitrary replica properties must not allow the > system properties to be changed > > > Key: SOLR-6577 > URL: https://issues.apache.org/jira/browse/SOLR-6577 > Project: Solr > Issue Type: Bug >Reporter: Erick Erickson >Assignee: Erick Erickson > Attachments: SOLR-6577.patch > > > Just realized a...significant problem with the "arbitrary property" bit > (SOLR-6512). The way I wrote it I can delete _any_ property at all, things > like > core > node_name > etc. > And when you _do_ delete some of these, interesting things happen, like the > cluster becomes unusable. Oops. > I think the right thing to do here is to automatically add a "property" > prefix to all of the arbitrary properties that a user tries to add or delete. > I'll have a patch up tomorrow for this I hope. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6351) Let Stats Hang off of Pivots (via 'tag')
[ https://issues.apache.org/jira/browse/SOLR-6351?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Vitaliy Zhovtyuk updated SOLR-6351: --- Attachment: SOLR-6351.patch Combined with previous patch. 1. Added more solrj tests for stats on pivots 2. Fixed stats result 3. Minor tweaks > Let Stats Hang off of Pivots (via 'tag') > > > Key: SOLR-6351 > URL: https://issues.apache.org/jira/browse/SOLR-6351 > Project: Solr > Issue Type: Sub-task >Reporter: Hoss Man > Attachments: SOLR-6351.patch, SOLR-6351.patch, SOLR-6351.patch > > > he goal here is basically flip the notion of "stats.facet" on it's head, so > that instead of asking the stats component to also do some faceting > (something that's never worked well with the variety of field types and has > never worked in distributed mode) we instead ask the PivotFacet code to > compute some stats X for each leaf in a pivot. We'll do this with the > existing {{stats.field}} params, but we'll leverage the {{tag}} local param > of the {{stats.field}} instances to be able to associate which stats we want > hanging off of which {{facet.pivot}} > Example... > {noformat} > facet.pivot={!stats=s1}category,manufacturer > stats.field={!key=avg_price tag=s1 mean=true}price > stats.field={!tag=s1 min=true max=true}user_rating > {noformat} > ...with the request above, in addition to computing the min/max user_rating > and mean price (labeled "avg_price") over the entire result set, the > PivotFacet component will also include those stats for every node of the tree > it builds up when generating a pivot of the fields "category,manufacturer" -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6577) The ability to add or change arbitrary replica properties must not allow the system properties to be changed
[ https://issues.apache.org/jira/browse/SOLR-6577?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erick Erickson updated SOLR-6577: - Attachment: SOLR-6577.patch Patch. I beefed up the tests and am committing this one. > The ability to add or change arbitrary replica properties must not allow the > system properties to be changed > > > Key: SOLR-6577 > URL: https://issues.apache.org/jira/browse/SOLR-6577 > Project: Solr > Issue Type: Bug >Reporter: Erick Erickson >Assignee: Erick Erickson > Attachments: SOLR-6577.patch > > > Just realized a...significant problem with the "arbitrary property" bit > (SOLR-6512). The way I wrote it I can delete _any_ property at all, things > like > core > node_name > etc. > And when you _do_ delete some of these, interesting things happen, like the > cluster becomes unusable. Oops. > I think the right thing to do here is to automatically add a "property" > prefix to all of the arbitrary properties that a user tries to add or delete. > I'll have a patch up tomorrow for this I hope. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6577) The ability to add or change arbitrary replica properties must not allow the system properties to be changed
[ https://issues.apache.org/jira/browse/SOLR-6577?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156911#comment-14156911 ] ASF subversion and git services commented on SOLR-6577: --- Commit 1629033 from [~erickoerickson] in branch 'dev/trunk' [ https://svn.apache.org/r1629033 ] SOLR-6577: The ability to add or change arbitrary replica properties must not allow the system properties to be changed > The ability to add or change arbitrary replica properties must not allow the > system properties to be changed > > > Key: SOLR-6577 > URL: https://issues.apache.org/jira/browse/SOLR-6577 > Project: Solr > Issue Type: Bug >Reporter: Erick Erickson >Assignee: Erick Erickson > > Just realized a...significant problem with the "arbitrary property" bit > (SOLR-6512). The way I wrote it I can delete _any_ property at all, things > like > core > node_name > etc. > And when you _do_ delete some of these, interesting things happen, like the > cluster becomes unusable. Oops. > I think the right thing to do here is to automatically add a "property" > prefix to all of the arbitrary properties that a user tries to add or delete. > I'll have a patch up tomorrow for this I hope. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6351) Let Stats Hang off of Pivots (via 'tag')
[ https://issues.apache.org/jira/browse/SOLR-6351?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Steve Molloy updated SOLR-6351: --- Attachment: SOLR-6351.patch Adapted patch a bit to avoid NPE when no stats are asked, modified PivotFacetValue to propagate info and other small tweaks. Tests seem to be happy and got some requests to work, so I am too... :) > Let Stats Hang off of Pivots (via 'tag') > > > Key: SOLR-6351 > URL: https://issues.apache.org/jira/browse/SOLR-6351 > Project: Solr > Issue Type: Sub-task >Reporter: Hoss Man > Attachments: SOLR-6351.patch, SOLR-6351.patch > > > he goal here is basically flip the notion of "stats.facet" on it's head, so > that instead of asking the stats component to also do some faceting > (something that's never worked well with the variety of field types and has > never worked in distributed mode) we instead ask the PivotFacet code to > compute some stats X for each leaf in a pivot. We'll do this with the > existing {{stats.field}} params, but we'll leverage the {{tag}} local param > of the {{stats.field}} instances to be able to associate which stats we want > hanging off of which {{facet.pivot}} > Example... > {noformat} > facet.pivot={!stats=s1}category,manufacturer > stats.field={!key=avg_price tag=s1 mean=true}price > stats.field={!tag=s1 min=true max=true}user_rating > {noformat} > ...with the request above, in addition to computing the min/max user_rating > and mean price (labeled "avg_price") over the entire result set, the > PivotFacet component will also include those stats for every node of the tree > it builds up when generating a pivot of the fields "category,manufacturer" -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6351) Let Stats Hang off of Pivots (via 'tag')
[ https://issues.apache.org/jira/browse/SOLR-6351?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156877#comment-14156877 ] Hoss Man commented on SOLR-6351: Vitaliy: i haven't had a chance to look in depth at your patch, but when i tried to run the tests all of the pivot code seemed to fail with an NPE? {noformat} [junit4] ERROR 30.8s J2 | DistributedFacetPivotSmallTest.testDistribSearch <<< [junit4]> Throwable #1: org.apache.solr.client.solrj.impl.HttpSolrServer$RemoteSolrException: java.lang.NullPointerException [junit4]>at org.apache.solr.handler.component.PivotFacetProcessor.getStatsFields(PivotFacetProcessor.java:158) [junit4]>at org.apache.solr.handler.component.PivotFacetProcessor.processSingle(PivotFacetProcessor.java:121) [junit4]>at org.apache.solr.handler.component.PivotFacetProcessor.process(PivotFacetProcessor.java:97) [junit4]>at org.apache.solr.handler.component.FacetComponent.process(FacetComponent.java:112) [junit4]>at org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:226) ... [junit4] ERROR 0.07s | FacetPivotSmallTest.testPivotFacetIndexSortMincountLimitAndOffsetPermutations <<< [junit4]> Throwable #1: java.lang.RuntimeException: Exception during query [junit4]>at __randomizedtesting.SeedInfo.seed([79673644714434B5:6E2351EE52D41611]:0) [junit4]>at org.apache.solr.SolrTestCaseJ4.assertQ(SolrTestCaseJ4.java:723) [junit4]>at org.apache.solr.SolrTestCaseJ4.assertQ(SolrTestCaseJ4.java:690) [junit4]>at org.apache.solr.handler.component.FacetPivotSmallTest.testPivotFacetIndexSortMincountLimitAndOffsetPermutations(FacetPivotSmallTest.java:425) [junit4]>at java.lang.Thread.run(Thread.java:745) [junit4]> Caused by: java.lang.NullPointerException [junit4]>at org.apache.solr.handler.component.PivotFacetProcessor.getStatsFields(PivotFacetProcessor.java:158) [junit4]>at org.apache.solr.handler.component.PivotFacetProcessor.processSingle(PivotFacetProcessor.java:121) ... {noformat} > Let Stats Hang off of Pivots (via 'tag') > > > Key: SOLR-6351 > URL: https://issues.apache.org/jira/browse/SOLR-6351 > Project: Solr > Issue Type: Sub-task >Reporter: Hoss Man > Attachments: SOLR-6351.patch > > > he goal here is basically flip the notion of "stats.facet" on it's head, so > that instead of asking the stats component to also do some faceting > (something that's never worked well with the variety of field types and has > never worked in distributed mode) we instead ask the PivotFacet code to > compute some stats X for each leaf in a pivot. We'll do this with the > existing {{stats.field}} params, but we'll leverage the {{tag}} local param > of the {{stats.field}} instances to be able to associate which stats we want > hanging off of which {{facet.pivot}} > Example... > {noformat} > facet.pivot={!stats=s1}category,manufacturer > stats.field={!key=avg_price tag=s1 mean=true}price > stats.field={!tag=s1 min=true max=true}user_rating > {noformat} > ...with the request above, in addition to computing the min/max user_rating > and mean price (labeled "avg_price") over the entire result set, the > PivotFacet component will also include those stats for every node of the tree > it builds up when generating a pivot of the fields "category,manufacturer" -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6581) Prepare CollapsingQParserPlugin and ExpandComponent for 5.0
[ https://issues.apache.org/jira/browse/SOLR-6581?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Joel Bernstein updated SOLR-6581: - Fix Version/s: 5.0 > Prepare CollapsingQParserPlugin and ExpandComponent for 5.0 > --- > > Key: SOLR-6581 > URL: https://issues.apache.org/jira/browse/SOLR-6581 > Project: Solr > Issue Type: Bug >Reporter: Joel Bernstein >Priority: Minor > Fix For: 5.0 > > > There we changes made to the CollapsingQParserPlugin and ExpandComponent in > the 5x branch that were driven by changes to the Lucene Collectors API and > DocValues API. This ticket is to review the 5x implementation and make any > changes necessary in preparation for a 5.0 release. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-6581) Prepare CollapsingQParserPlugin and ExpandComponent for 5.0
Joel Bernstein created SOLR-6581: Summary: Prepare CollapsingQParserPlugin and ExpandComponent for 5.0 Key: SOLR-6581 URL: https://issues.apache.org/jira/browse/SOLR-6581 Project: Solr Issue Type: Bug Reporter: Joel Bernstein Priority: Minor There we changes made to the CollapsingQParserPlugin and ExpandComponent in the 5x branch that were driven by changes to the Lucene Collectors API and DocValues API. This ticket is to review the 5x implementation and make any changes necessary in preparation for a 5.0 release. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6510) select?collapse=... - fix NPE in Collapsing(FieldValue|Score)Collector
[ https://issues.apache.org/jira/browse/SOLR-6510?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156822#comment-14156822 ] Joel Bernstein commented on SOLR-6510: -- If there is going to be a 4.10.2, then we could fix this bug for that release though. > select?collapse=... - fix NPE in Collapsing(FieldValue|Score)Collector > -- > > Key: SOLR-6510 > URL: https://issues.apache.org/jira/browse/SOLR-6510 > Project: Solr > Issue Type: Bug >Affects Versions: 4.8.1 >Reporter: Christine Poerschke >Assignee: Joel Bernstein >Priority: Minor > > Affects branch_4x but not trunk, collapse field must be docValues=true and > shard empty (or with nothing indexed for the field?). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6510) select?collapse=... - fix NPE in Collapsing(FieldValue|Score)Collector
[ https://issues.apache.org/jira/browse/SOLR-6510?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156815#comment-14156815 ] Joel Bernstein commented on SOLR-6510: -- If I've been following things correctly the next release will be cut from branch_5x. The CollapsingQParserPlugin has a different implementation for 5.0 that uses the DocValues class, which I believe does not have this issue. More testing needs to be done with 5.0 implementation to understand the full effect of this change. The change was made as part of a large patch when the new DocValues class was introduced to Lucene. I'll open another ticket as a review for the CollapsingQParserPlugin 5.0. We can probably close this ticket though. > select?collapse=... - fix NPE in Collapsing(FieldValue|Score)Collector > -- > > Key: SOLR-6510 > URL: https://issues.apache.org/jira/browse/SOLR-6510 > Project: Solr > Issue Type: Bug >Affects Versions: 4.8.1 >Reporter: Christine Poerschke >Assignee: Joel Bernstein >Priority: Minor > > Affects branch_4x but not trunk, collapse field must be docValues=true and > shard empty (or with nothing indexed for the field?). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6510) select?collapse=... - fix NPE in Collapsing(FieldValue|Score)Collector
[ https://issues.apache.org/jira/browse/SOLR-6510?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156773#comment-14156773 ] David Smiley commented on SOLR-6510: I'd like to see this get fixed for the next Solr release. Joel; if you're not going to be able to do it, I'll step in. Just let me know. > select?collapse=... - fix NPE in Collapsing(FieldValue|Score)Collector > -- > > Key: SOLR-6510 > URL: https://issues.apache.org/jira/browse/SOLR-6510 > Project: Solr > Issue Type: Bug >Affects Versions: 4.8.1 >Reporter: Christine Poerschke >Assignee: Joel Bernstein >Priority: Minor > > Affects branch_4x but not trunk, collapse field must be docValues=true and > shard empty (or with nothing indexed for the field?). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
ICUFoldingFilter mapping modifications... possible?
The mapping in the ICUFoldingFilter has been pretty static, as far as I know, and I'm wondering whether there's any willingness to apply adjustments. At the Cornell Library, we're finding ourselves having to make separate PatternReplace mapping rules because of specific characters that should be handled by the filter's punctuation folding being handled by the diacritic removal instead. The result is extremely troublesome searching behavior for Romanized Arabic text, at least, and probably creates issues with other languages. I have conferred with a college at the Columbia University Library, who has had to make similar adjustments to the mapping. I would ideally like to see these changes made at the source, which would simplify my configuration, and improve searching for everyone depending on this filter to take care of the messy international characters out of the box. Would such an update be likely to be accepted, or is the filter wed to its current configuration? Thanks, Frances Frances Webb Developer Cornell University Library
[jira] [Assigned] (SOLR-1632) Distributed IDF
[ https://issues.apache.org/jira/browse/SOLR-1632?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Anshum Gupta reassigned SOLR-1632: -- Assignee: Anshum Gupta (was: Mark Miller) > Distributed IDF > --- > > Key: SOLR-1632 > URL: https://issues.apache.org/jira/browse/SOLR-1632 > Project: Solr > Issue Type: New Feature > Components: search >Affects Versions: 1.5 >Reporter: Andrzej Bialecki >Assignee: Anshum Gupta > Fix For: 5.0, Trunk > > Attachments: 3x_SOLR-1632_doesntwork.patch, SOLR-1632.patch, > SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, > SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, > SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, > distrib-2.patch, distrib.patch > > > Distributed IDF is a valuable enhancement for distributed search across > non-uniform shards. This issue tracks the proposed implementation of an API > to support this functionality in Solr. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-1632) Distributed IDF
[ https://issues.apache.org/jira/browse/SOLR-1632?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Anshum Gupta updated SOLR-1632: --- Fix Version/s: (was: 4.9) 5.0 > Distributed IDF > --- > > Key: SOLR-1632 > URL: https://issues.apache.org/jira/browse/SOLR-1632 > Project: Solr > Issue Type: New Feature > Components: search >Affects Versions: 1.5 >Reporter: Andrzej Bialecki >Assignee: Anshum Gupta > Fix For: 5.0, Trunk > > Attachments: 3x_SOLR-1632_doesntwork.patch, SOLR-1632.patch, > SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, > SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, > SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, SOLR-1632.patch, > distrib-2.patch, distrib.patch > > > Distributed IDF is a valuable enhancement for distributed search across > non-uniform shards. This issue tracks the proposed implementation of an API > to support this functionality in Solr. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6249) Schema API changes return success before all cores are updated
[ https://issues.apache.org/jira/browse/SOLR-6249?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Timothy Potter updated SOLR-6249: - Attachment: SOLR-6249_reconnect.patch Here's an updated patch for the reconnect issue with some unit testing added to verify the watcher fires correctly after a zk session expiration. I think this is good to go. > Schema API changes return success before all cores are updated > -- > > Key: SOLR-6249 > URL: https://issues.apache.org/jira/browse/SOLR-6249 > Project: Solr > Issue Type: Improvement > Components: Schema and Analysis, SolrCloud >Reporter: Gregory Chanan >Assignee: Timothy Potter > Attachments: SOLR-6249.patch, SOLR-6249.patch, SOLR-6249.patch, > SOLR-6249_reconnect.patch, SOLR-6249_reconnect.patch > > > See SOLR-6137 for more details. > The basic issue is that Schema API changes return success when the first core > is updated, but other cores asynchronously read the updated schema from > ZooKeeper. > So a client application could make a Schema API change and then index some > documents based on the new schema that may fail on other nodes. > Possible fixes: > 1) Make the Schema API calls synchronous > 2) Give the client some ability to track the state of the schema. They can > already do this to a certain extent by checking the Schema API on all the > replicas and verifying that the field has been added, though this is pretty > cumbersome. Maybe it makes more sense to do this sort of thing on the > collection level, i.e. Schema API changes return the zk version to the > client. We add an API to return the current zk version. On a replica, if > the zk version is >= the version the client has, the client knows that > replica has at least seen the schema change. We could also provide an API to > do the distribution and checking across the different replicas of the > collection so that clients don't need ot do that themselves. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-6444) HttpPartitionTest uses real-time get to verify docs exist in replicas which gets routed to an active replica so is not actually verifying the replica recovered correctly
[ https://issues.apache.org/jira/browse/SOLR-6444?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Timothy Potter resolved SOLR-6444. -- Resolution: Fixed Fix Version/s: 5.0 > HttpPartitionTest uses real-time get to verify docs exist in replicas which > gets routed to an active replica so is not actually verifying the replica > recovered correctly > - > > Key: SOLR-6444 > URL: https://issues.apache.org/jira/browse/SOLR-6444 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Timothy Potter >Assignee: Timothy Potter > Fix For: 5.0 > > > Need to fix the assertDocExists method to use distrib=false -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6395) If the overseer queue is large, then the cloud tree view (admin UI) hangs
[ https://issues.apache.org/jira/browse/SOLR-6395?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156701#comment-14156701 ] Timothy Potter commented on SOLR-6395: -- Yes, I think that would be the best approach - to just request znodes per level and then dynamically pull in more znodes as the user interacts with the tree > If the overseer queue is large, then the cloud tree view (admin UI) hangs > - > > Key: SOLR-6395 > URL: https://issues.apache.org/jira/browse/SOLR-6395 > Project: Solr > Issue Type: Bug > Components: SolrCloud, web gui >Reporter: Timothy Potter >Assignee: Timothy Potter > > Of course, an overseer queue that is backed up is a symptom of bigger issues, > but if it is, the tree view in the cloud panel becomes almost un-usable, > presumably because the UI is trying to pull all the overseer queue child > nodes? Be better to lazily load child nodes when the parent znode tree > element is opened. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5969) Add Lucene50Codec
[ https://issues.apache.org/jira/browse/LUCENE-5969?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156697#comment-14156697 ] ASF subversion and git services commented on LUCENE-5969: - Commit 1629008 from [~rcmuir] in branch 'dev/branches/lucene5969' [ https://svn.apache.org/r1629008 ] LUCENE-5969: simplify cfs for 5.0 > Add Lucene50Codec > - > > Key: LUCENE-5969 > URL: https://issues.apache.org/jira/browse/LUCENE-5969 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5969.patch, LUCENE-5969.patch > > > Spinoff from LUCENE-5952: > * Fix .si to write Version as 3 ints, not a String that requires parsing at > read time. > * Lucene42TermVectorsFormat should not use the same codecName as > Lucene41StoredFieldsFormat > It would also be nice if we had a "bumpCodecVersion" script so rolling a new > codec is not so daunting. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-6511) Fencepost error in LeaderInitiatedRecoveryThread
[ https://issues.apache.org/jira/browse/SOLR-6511?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Timothy Potter resolved SOLR-6511. -- Resolution: Fixed Fix Version/s: 5.0 > Fencepost error in LeaderInitiatedRecoveryThread > > > Key: SOLR-6511 > URL: https://issues.apache.org/jira/browse/SOLR-6511 > Project: Solr > Issue Type: Bug >Reporter: Alan Woodward >Assignee: Timothy Potter > Fix For: 5.0 > > Attachments: SOLR-6511.patch, SOLR-6511.patch > > > At line 106: > {code} > while (continueTrying && ++tries < maxTries) { > {code} > should be > {code} > while (continueTrying && ++tries <= maxTries) { > {code} > This is only a problem when called from DistributedUpdateProcessor, as it can > have maxTries set to 1, which means the loop is never actually run. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-6550) Provide simple mechanism for passing additional metadata / context about a server-side SolrException back to the client-side
[ https://issues.apache.org/jira/browse/SOLR-6550?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Timothy Potter resolved SOLR-6550. -- Resolution: Fixed Fix Version/s: 5.0 > Provide simple mechanism for passing additional metadata / context about a > server-side SolrException back to the client-side > > > Key: SOLR-6550 > URL: https://issues.apache.org/jira/browse/SOLR-6550 > Project: Solr > Issue Type: Improvement > Components: SolrJ >Reporter: Timothy Potter >Assignee: Timothy Potter > Fix For: 5.0 > > Attachments: SOLR-6550.patch > > > While trying to resolve SOLR-6511, it became apparent that I didn't have a > good way to convey more information about a particular error occurring on the > server-side using SolrException. The specific situation I encountered is a > replica took over as leader, but the previous leader wasn't aware of that yet > (due to a Zk session expiration). So when the previous leader (the one that > experienced the Zk session expiration) sent an update request with > FROMLEADER, the new leader rejected the request with a SolrException. > Ideally, we want the new leader to be able to say "you're not the leader > anymore" and for the previous leader to fail the request in a specific way; > see SOLR-6511 for more background on this scenario. > My first inclination was to just extend SolrException and throw a > LeaderChangedException and have the client behave accordingly but then I > discovered that CUSS just takes the status code and error message and > reconstructs a new SolrException (on the client side). HttpSolrServer does > the same thing when creating a RemoteSolrException. So the fact that the > server-side throw a LeaderChangeException is basically lost in translation. > I'm open to other suggestions but here's my approach so far: > Add a {{NamedList metadata}} field to the SolrException class. > If a server-side component wants to add additional context / metadata, then > it will call: {{solrExc.setMetadata("name", "value);}} > When the response is being marshaled into the wire format, ResponseUtils will > include the metadata if available. On the client side, when the response is > processed, the metadata gets included into the new SolrException (in CUSS) or > RemoteSolrException (HttpSolrServer). It's up to the client to dig into the > metadata to take additional steps as I'll be doing in > DistributedUpdateProcessor. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6564) Fix failing ExitableDirectoryReader tests for Solr
[ https://issues.apache.org/jira/browse/SOLR-6564?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156681#comment-14156681 ] Anshum Gupta commented on SOLR-6564: Thanks for fixing this [~shalinmangar] > Fix failing ExitableDirectoryReader tests for Solr > -- > > Key: SOLR-6564 > URL: https://issues.apache.org/jira/browse/SOLR-6564 > Project: Solr > Issue Type: Test > Components: Tests >Reporter: Anshum Gupta >Assignee: Anshum Gupta > > ExitableDirectoryReader tests are failing as they enumerate over the terms in > less than 1ms (min timeAllowed value that case be set). Need to fix this. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Comment Edited] (SOLR-6460) Keep transaction logs around longer
[ https://issues.apache.org/jira/browse/SOLR-6460?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156672#comment-14156672 ] Renaud Delbru edited comment on SOLR-6460 at 10/2/14 3:47 PM: -- Here is the latest patch which includes an optimisation to reduce the number of opened files and some code cleaning. To summarise, the current patch provides the following: h4. Cleaning of Old Transaction Logs The CdcrUpdateLog removes old tlogs based on pointers instead of a fixed size limit. h4. Log Reader The CdcrUpdateLog provides a log reader with scan and seek operations. A log reader is associated to a log pointer, and is taking care of the life-cycle of the pointer. h4. Log Index To improve the efficiency of the seek operation of the log reader, an index of transaction log files has been added. This index enables to quickly lookup a tlog file based on a version number. This index is implemented by adding a version number to the tlog filename and by leveraging the file system index. This solution was choosen as it was simpler and more robust than managing a separate disk-based index. h4. Number of Opened Files TransactionLog has been extended to automatically (1) close the output stream when its refeference count reaches 0, and (2) reopen the output stream on demand. The new tlog (the current tlog being written) is kept open at all time. When a transaction log is pushed to the old tlog list, its reference count is decremented, which might trigger the closing of the output stream. The output stream is reopened in two cases: * during recovery, to write a commit to the end of an uncapped tlog file; * when a log reader is accessing it. At the moment, the logic is splitted into two classes (TransactionLog and CdcrTransactionLog). We should probably merge the two in the final version. h4. Integration within the UpdateHandler There is a nocommit in the UpdateHandler to force the instantiation of the CdcrUpdateLog instead of the UpdateLog. We need to decide how user will configure this and modify the UpdateHandler appropriately. was (Author: rendel): Here is the latest patch which includes an optimisation to reduce the number of opened files and some code cleaning. To summarise, the current patch provides the following: h4. Cleaning of Old Transaction Logs The CdcrUpdateLog removes old tlogs based on pointers instead of a fixed size limit. h4. Log Reader The CdcrUpdateLog provides a log reader with scan and seek operations. A log reader is associated to a log pointer, and is taking care of the life-cycle of the pointer. h4. Log Index To improve the efficiency of the seek operation of the log reader, an index of transaction log files have been added. This index enables to quickly lookup a tlog file based on a version number. This index is implemented by adding a version number to the tlog filename and by leveraging the file system index. This solution was choosen as it was simpler and more robust than managing a separate disk-based index. h4. Number of Opened Files TransactionLog has been extended to automatically (1) close the output stream when its refeference count reach 0, and (2) reopen the output stream on demand. The new tlog (the current tlog being written) is kept open at all time. When a transaction log is pushed to the old tlog list, its reference count is decremented, which might trigger the closing of the output stream. The output stream is reopened in two cases: * during recovery, to write a commit to the end of an uncapped tlog file; * when a log reader is accessing it. At the moment, the logic is splitted into two classes (TransactionLog and CdcrTransactionLog). We should probably merge the two in the final version. h4. Integration within the UpdateHandler There is a nocommit in the UpdateHandler to force the instantiation of the CdcrUpdateLog instead of the UpdateLog. We need to decide how user will configure this and modify the UpdateHandler appropriately. > Keep transaction logs around longer > --- > > Key: SOLR-6460 > URL: https://issues.apache.org/jira/browse/SOLR-6460 > Project: Solr > Issue Type: Sub-task >Reporter: Yonik Seeley > Attachments: SOLR-6460.patch, SOLR-6460.patch, SOLR-6460.patch > > > Transaction logs are currently deleted relatively quickly... but we need to > keep them around much longer to be used as a source for cross-datacenter > recovery. This will also be useful in the future for enabling peer-sync to > use more historical updates before falling back to replication. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.a
[jira] [Commented] (LUCENE-5969) Add Lucene50Codec
[ https://issues.apache.org/jira/browse/LUCENE-5969?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156675#comment-14156675 ] ASF subversion and git services commented on LUCENE-5969: - Commit 1629001 from [~rcmuir] in branch 'dev/branches/lucene5969' [ https://svn.apache.org/r1629001 ] LUCENE-5969: remove back compat > Add Lucene50Codec > - > > Key: LUCENE-5969 > URL: https://issues.apache.org/jira/browse/LUCENE-5969 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5969.patch, LUCENE-5969.patch > > > Spinoff from LUCENE-5952: > * Fix .si to write Version as 3 ints, not a String that requires parsing at > read time. > * Lucene42TermVectorsFormat should not use the same codecName as > Lucene41StoredFieldsFormat > It would also be nice if we had a "bumpCodecVersion" script so rolling a new > codec is not so daunting. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6460) Keep transaction logs around longer
[ https://issues.apache.org/jira/browse/SOLR-6460?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Renaud Delbru updated SOLR-6460: Attachment: SOLR-6460.patch Here is the latest patch which includes an optimisation to reduce the number of opened files and some code cleaning. To summarise, the current patch provides the following: h4. Cleaning of Old Transaction Logs The CdcrUpdateLog removes old tlogs based on pointers instead of a fixed size limit. h4. Log Reader The CdcrUpdateLog provides a log reader with scan and seek operations. A log reader is associated to a log pointer, and is taking care of the life-cycle of the pointer. h4. Log Index To improve the efficiency of the seek operation of the log reader, an index of transaction log files have been added. This index enables to quickly lookup a tlog file based on a version number. This index is implemented by adding a version number to the tlog filename and by leveraging the file system index. This solution was choosen as it was simpler and more robust than managing a separate disk-based index. h4. Number of Opened Files TransactionLog has been extended to automatically (1) close the output stream when its refeference count reach 0, and (2) reopen the output stream on demand. The new tlog (the current tlog being written) is kept open at all time. When a transaction log is pushed to the old tlog list, its reference count is decremented, which might trigger the closing of the output stream. The output stream is reopened in two cases: * during recovery, to write a commit to the end of an uncapped tlog file; * when a log reader is accessing it. At the moment, the logic is splitted into two classes (TransactionLog and CdcrTransactionLog). We should probably merge the two in the final version. h4. Integration within the UpdateHandler There is a nocommit in the UpdateHandler to force the instantiation of the CdcrUpdateLog instead of the UpdateLog. We need to decide how user will configure this and modify the UpdateHandler appropriately. > Keep transaction logs around longer > --- > > Key: SOLR-6460 > URL: https://issues.apache.org/jira/browse/SOLR-6460 > Project: Solr > Issue Type: Sub-task >Reporter: Yonik Seeley > Attachments: SOLR-6460.patch, SOLR-6460.patch, SOLR-6460.patch > > > Transaction logs are currently deleted relatively quickly... but we need to > keep them around much longer to be used as a source for cross-datacenter > recovery. This will also be useful in the future for enabling peer-sync to > use more historical updates before falling back to replication. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5969) Add Lucene50Codec
[ https://issues.apache.org/jira/browse/LUCENE-5969?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=1415#comment-1415 ] ASF subversion and git services commented on LUCENE-5969: - Commit 1628996 from [~rcmuir] in branch 'dev/branches/lucene5969' [ https://svn.apache.org/r1628996 ] LUCENE-5969: move CFS to codec > Add Lucene50Codec > - > > Key: LUCENE-5969 > URL: https://issues.apache.org/jira/browse/LUCENE-5969 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5969.patch, LUCENE-5969.patch > > > Spinoff from LUCENE-5952: > * Fix .si to write Version as 3 ints, not a String that requires parsing at > read time. > * Lucene42TermVectorsFormat should not use the same codecName as > Lucene41StoredFieldsFormat > It would also be nice if we had a "bumpCodecVersion" script so rolling a new > codec is not so daunting. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6550) Provide simple mechanism for passing additional metadata / context about a server-side SolrException back to the client-side
[ https://issues.apache.org/jira/browse/SOLR-6550?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156644#comment-14156644 ] ASF subversion and git services commented on SOLR-6550: --- Commit 1628992 from [~thelabdude] in branch 'dev/trunk' [ https://svn.apache.org/r1628992 ] SOLR-6550: mention this fix in other changes section of solr/CHANGES.txt > Provide simple mechanism for passing additional metadata / context about a > server-side SolrException back to the client-side > > > Key: SOLR-6550 > URL: https://issues.apache.org/jira/browse/SOLR-6550 > Project: Solr > Issue Type: Improvement > Components: SolrJ >Reporter: Timothy Potter >Assignee: Timothy Potter > Attachments: SOLR-6550.patch > > > While trying to resolve SOLR-6511, it became apparent that I didn't have a > good way to convey more information about a particular error occurring on the > server-side using SolrException. The specific situation I encountered is a > replica took over as leader, but the previous leader wasn't aware of that yet > (due to a Zk session expiration). So when the previous leader (the one that > experienced the Zk session expiration) sent an update request with > FROMLEADER, the new leader rejected the request with a SolrException. > Ideally, we want the new leader to be able to say "you're not the leader > anymore" and for the previous leader to fail the request in a specific way; > see SOLR-6511 for more background on this scenario. > My first inclination was to just extend SolrException and throw a > LeaderChangedException and have the client behave accordingly but then I > discovered that CUSS just takes the status code and error message and > reconstructs a new SolrException (on the client side). HttpSolrServer does > the same thing when creating a RemoteSolrException. So the fact that the > server-side throw a LeaderChangeException is basically lost in translation. > I'm open to other suggestions but here's my approach so far: > Add a {{NamedList metadata}} field to the SolrException class. > If a server-side component wants to add additional context / metadata, then > it will call: {{solrExc.setMetadata("name", "value);}} > When the response is being marshaled into the wire format, ResponseUtils will > include the metadata if available. On the client side, when the response is > processed, the metadata gets included into the new SolrException (in CUSS) or > RemoteSolrException (HttpSolrServer). It's up to the client to dig into the > metadata to take additional steps as I'll be doing in > DistributedUpdateProcessor. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6550) Provide simple mechanism for passing additional metadata / context about a server-side SolrException back to the client-side
[ https://issues.apache.org/jira/browse/SOLR-6550?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156642#comment-14156642 ] ASF subversion and git services commented on SOLR-6550: --- Commit 1628991 from [~thelabdude] in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1628991 ] SOLR-6550: mention this fix in other changes section of solr/CHANGES.txt > Provide simple mechanism for passing additional metadata / context about a > server-side SolrException back to the client-side > > > Key: SOLR-6550 > URL: https://issues.apache.org/jira/browse/SOLR-6550 > Project: Solr > Issue Type: Improvement > Components: SolrJ >Reporter: Timothy Potter >Assignee: Timothy Potter > Attachments: SOLR-6550.patch > > > While trying to resolve SOLR-6511, it became apparent that I didn't have a > good way to convey more information about a particular error occurring on the > server-side using SolrException. The specific situation I encountered is a > replica took over as leader, but the previous leader wasn't aware of that yet > (due to a Zk session expiration). So when the previous leader (the one that > experienced the Zk session expiration) sent an update request with > FROMLEADER, the new leader rejected the request with a SolrException. > Ideally, we want the new leader to be able to say "you're not the leader > anymore" and for the previous leader to fail the request in a specific way; > see SOLR-6511 for more background on this scenario. > My first inclination was to just extend SolrException and throw a > LeaderChangedException and have the client behave accordingly but then I > discovered that CUSS just takes the status code and error message and > reconstructs a new SolrException (on the client side). HttpSolrServer does > the same thing when creating a RemoteSolrException. So the fact that the > server-side throw a LeaderChangeException is basically lost in translation. > I'm open to other suggestions but here's my approach so far: > Add a {{NamedList metadata}} field to the SolrException class. > If a server-side component wants to add additional context / metadata, then > it will call: {{solrExc.setMetadata("name", "value);}} > When the response is being marshaled into the wire format, ResponseUtils will > include the metadata if available. On the client side, when the response is > processed, the metadata gets included into the new SolrException (in CUSS) or > RemoteSolrException (HttpSolrServer). It's up to the client to dig into the > metadata to take additional steps as I'll be doing in > DistributedUpdateProcessor. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6550) Provide simple mechanism for passing additional metadata / context about a server-side SolrException back to the client-side
[ https://issues.apache.org/jira/browse/SOLR-6550?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156639#comment-14156639 ] ASF subversion and git services commented on SOLR-6550: --- Commit 1628988 from [~thelabdude] in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1628988 ] SOLR-6550: Provide simple mechanism for passing additional metadata / context about a server-side SolrException back to the client-side > Provide simple mechanism for passing additional metadata / context about a > server-side SolrException back to the client-side > > > Key: SOLR-6550 > URL: https://issues.apache.org/jira/browse/SOLR-6550 > Project: Solr > Issue Type: Improvement > Components: SolrJ >Reporter: Timothy Potter >Assignee: Timothy Potter > Attachments: SOLR-6550.patch > > > While trying to resolve SOLR-6511, it became apparent that I didn't have a > good way to convey more information about a particular error occurring on the > server-side using SolrException. The specific situation I encountered is a > replica took over as leader, but the previous leader wasn't aware of that yet > (due to a Zk session expiration). So when the previous leader (the one that > experienced the Zk session expiration) sent an update request with > FROMLEADER, the new leader rejected the request with a SolrException. > Ideally, we want the new leader to be able to say "you're not the leader > anymore" and for the previous leader to fail the request in a specific way; > see SOLR-6511 for more background on this scenario. > My first inclination was to just extend SolrException and throw a > LeaderChangedException and have the client behave accordingly but then I > discovered that CUSS just takes the status code and error message and > reconstructs a new SolrException (on the client side). HttpSolrServer does > the same thing when creating a RemoteSolrException. So the fact that the > server-side throw a LeaderChangeException is basically lost in translation. > I'm open to other suggestions but here's my approach so far: > Add a {{NamedList metadata}} field to the SolrException class. > If a server-side component wants to add additional context / metadata, then > it will call: {{solrExc.setMetadata("name", "value);}} > When the response is being marshaled into the wire format, ResponseUtils will > include the metadata if available. On the client side, when the response is > processed, the metadata gets included into the new SolrException (in CUSS) or > RemoteSolrException (HttpSolrServer). It's up to the client to dig into the > metadata to take additional steps as I'll be doing in > DistributedUpdateProcessor. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6511) Fencepost error in LeaderInitiatedRecoveryThread
[ https://issues.apache.org/jira/browse/SOLR-6511?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156641#comment-14156641 ] ASF subversion and git services commented on SOLR-6511: --- Commit 1628989 from [~thelabdude] in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1628989 ] SOLR-6511: Fencepost error in LeaderInitiatedRecoveryThread; refactor HttpPartitionTest to resolve jenkins failures. > Fencepost error in LeaderInitiatedRecoveryThread > > > Key: SOLR-6511 > URL: https://issues.apache.org/jira/browse/SOLR-6511 > Project: Solr > Issue Type: Bug >Reporter: Alan Woodward >Assignee: Timothy Potter > Attachments: SOLR-6511.patch, SOLR-6511.patch > > > At line 106: > {code} > while (continueTrying && ++tries < maxTries) { > {code} > should be > {code} > while (continueTrying && ++tries <= maxTries) { > {code} > This is only a problem when called from DistributedUpdateProcessor, as it can > have maxTries set to 1, which means the loop is never actually run. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-trunk-Linux (64bit/jdk1.8.0_40-ea-b04) - Build # 11370 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/11370/ Java: 64bit/jdk1.8.0_40-ea-b04 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC 1 tests failed. FAILED: org.apache.solr.cloud.LeaderInitiatedRecoveryOnCommitTest.testDistribSearch Error Message: Server refused connection at: http://127.0.0.1:39565 Stack Trace: org.apache.solr.client.solrj.SolrServerException: Server refused connection at: http://127.0.0.1:39565 at __randomizedtesting.SeedInfo.seed([3C3623CE94700F57:BDD0ADD6E32F6F6B]:0) at org.apache.solr.client.solrj.impl.HttpSolrServer.executeMethod(HttpSolrServer.java:578) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:215) at org.apache.solr.client.solrj.impl.HttpSolrServer.request(HttpSolrServer.java:211) at org.apache.solr.cloud.BasicDistributedZkTest.createCollection(BasicDistributedZkTest.java:612) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.createCollection(AbstractFullDistribZkTestBase.java:1579) at org.apache.solr.cloud.LeaderInitiatedRecoveryOnCommitTest.multiShardTest(LeaderInitiatedRecoveryOnCommitTest.java:77) at org.apache.solr.cloud.LeaderInitiatedRecoveryOnCommitTest.doTest(LeaderInitiatedRecoveryOnCommitTest.java:71) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAsserti
[jira] [Updated] (SOLR-6580) facet(.query) responses duplicated
[ https://issues.apache.org/jira/browse/SOLR-6580?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erik Hatcher updated SOLR-6580: --- Description: I uncommented the invariants of the standard request handler commented out in the default example solrconfig.xml, restarted Solr, and made this request {{http://localhost:8983/solr/collection1/select?q=*:*&facet=on&facet.query=foo&rows=0}} and got duplicate responses back for the invariant price range facet.query's (but no facet.query response for the query string provided one, as expected): {code} 14 2 14 2 {code} was:I uncommented the invariants of the standard request handler commented out in the default example solrconfig.xml, restarted Solr, and made this request {{http://localhost:8983/solr/collection1/select?q=*:*&facet=on&facet.query=foo&rows=0}} and got duplicate responses back for the invariant price range facet.query's (but no facet.query response for the query string provided one, as expected). > facet(.query) responses duplicated > -- > > Key: SOLR-6580 > URL: https://issues.apache.org/jira/browse/SOLR-6580 > Project: Solr > Issue Type: Bug >Affects Versions: 4.10, 4.10.1 >Reporter: Erik Hatcher > Fix For: 5.0, Trunk > > > I uncommented the invariants of the standard request handler commented out in > the default example solrconfig.xml, restarted Solr, and made this request > {{http://localhost:8983/solr/collection1/select?q=*:*&facet=on&facet.query=foo&rows=0}} > and got duplicate responses back for the invariant price range facet.query's > (but no facet.query response for the query string provided one, as expected): > {code} > > 14 > 2 > 14 > 2 > > {code} -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6580) facet(.query) responses duplicated
[ https://issues.apache.org/jira/browse/SOLR-6580?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156625#comment-14156625 ] Erik Hatcher commented on SOLR-6580: Looks to be a regression introduced in SOLR-6314 > facet(.query) responses duplicated > -- > > Key: SOLR-6580 > URL: https://issues.apache.org/jira/browse/SOLR-6580 > Project: Solr > Issue Type: Bug >Affects Versions: 4.10, 4.10.1 >Reporter: Erik Hatcher > Fix For: 5.0, Trunk > > > I uncommented the invariants of the standard request handler commented out in > the default example solrconfig.xml, restarted Solr, and made this request > {{http://localhost:8983/solr/collection1/select?q=*:*&facet=on&facet.query=foo&rows=0}} > and got duplicate responses back for the invariant price range facet.query's > (but no facet.query response for the query string provided one, as expected): > {code} > > 14 > 2 > 14 > 2 > > {code} -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-5064) Update to a more recent version.
[ https://issues.apache.org/jira/browse/SOLR-5064?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156622#comment-14156622 ] Sébastien Deleuze commented on SOLR-5064: - Hi, It would be nice to update your Woodstock version since the artifactId has changed (wstx-asl to woodstox-core-asl), so even when someone want to use the latest org.codehaus.woodstox:woodstox-core-asl:4.4.1 by defining this dependency explicitly, he got both woodstox-core-asl and wstx-asl in the classpath. I am currently adding support for Jackson based XML serialization to Spring Boot. This feature require latest woodstox-core-asl version, but since Spring Boot and Spring Data integrate solr-solrj, we have to exclude wstx-asl and add manually a dependency on woodstox-core-asl, that's not very nice. Thanks in advance for your help, Sébastien > Update transitive="false"/> to a more recent version. > - > > Key: SOLR-5064 > URL: https://issues.apache.org/jira/browse/SOLR-5064 > Project: Solr > Issue Type: Improvement >Reporter: Mark Miller >Assignee: Mark Miller >Priority: Minor > Fix For: 4.9, Trunk > > > @whoschek mentioned to me earlier that we were using a fairly old version. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-6580) facet(.query) responses duplicated
Erik Hatcher created SOLR-6580: -- Summary: facet(.query) responses duplicated Key: SOLR-6580 URL: https://issues.apache.org/jira/browse/SOLR-6580 Project: Solr Issue Type: Bug Affects Versions: 4.10.1, 4.10 Reporter: Erik Hatcher Fix For: 5.0, Trunk I uncommented the invariants of the standard request handler commented out in the default example solrconfig.xml, restarted Solr, and made this request {{http://localhost:8983/solr/collection1/select?q=*:*&facet=on&facet.query=foo&rows=0}} and got duplicate responses back for the invariant price range facet.query's (but no facet.query response for the query string provided one, as expected). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6579) SnapPuller Replication blocks clean shutdown of tomcat
[ https://issues.apache.org/jira/browse/SOLR-6579?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Philip Black-Knight updated SOLR-6579: -- Attachment: cleanupSnapPullerFinally.patch > SnapPuller Replication blocks clean shutdown of tomcat > -- > > Key: SOLR-6579 > URL: https://issues.apache.org/jira/browse/SOLR-6579 > Project: Solr > Issue Type: Bug >Affects Versions: 4.10.1 >Reporter: Philip Black-Knight > Attachments: cleanupSnapPullerFinally.patch > > > main issue was described in the mailing list her: > http://mail-archives.apache.org/mod_mbox/lucene-solr-user/201409.mbox/browser > and > here: > but also including the quotes: > original message from Nick > {quote} > Hello, > I have solr 4.10 running on tomcat 7. I'm doing replication from one master > to about 10 slaves, with standard configuration: > {code} > > > ${enable.master:false} > commit > startup > schema.xml,stopwords.txt > > > > ${enable.slave:false} > http://master:8080/solr/mycore > 00:00:60 > > > {code} > It appears that if tomcat gets shutdown while solr is replicating, it > prevents tomcat from shutting down fully. Immediately after receiving the > shutdown command, a thread dump is logged into catalina.out (this may have > been turned on by some configuration someone else on my team made). I > removed some threads that didn't look related, mostly about tomcat session > replication, or with names like "http-bio-8080-exec-10". > {code} > 62252 [http-bio-8080-exec-1] INFO org.apache.solr.core.SolrCore – > [mycore] webapp=/solr path=/replication > params={command=details&_=1412014928648&wt=json} status=0 QTime=6 > 63310 [http-bio-8080-exec-1] INFO org.apache.solr.core.SolrCore – > [mycore] webapp=/solr path=/replication > params={command=details&_=1412014929699&wt=json} status=0 QTime=6 > 2014-09-29 14:22:10 > Full thread dump Java HotSpot(TM) 64-Bit Server VM (24.65-b04 mixed > mode): > "fsyncService-12-thread-1" prio=10 tid=0x7f3bd4002000 nid=0x203d > waiting on condition [0x7f3c271f] >java.lang.Thread.State: WAITING (parking) > at sun.misc.Unsafe.park(Native Method) > - parking to wait for <0x0007e1ff4458> (a > java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject) > at > java.util.concurrent.locks.LockSupport.park(LockSupport.java:186) > at > java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2043) > at > java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442) > at > java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1068) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:745) > "explicit-fetchindex-cmd" daemon prio=10 tid=0x7f3c0413e800 > nid=0x203c runnable [0x7f3c272f1000] >java.lang.Thread.State: RUNNABLE > at java.net.SocketInputStream.socketRead0(Native Method) > at java.net.SocketInputStream.read(SocketInputStream.java:152) > at java.net.SocketInputStream.read(SocketInputStream.java:122) > at > org.apache.http.impl.io.AbstractSessionInputBuffer.read(AbstractSessionInputBuffer.java:198) > at > org.apache.http.impl.io.ChunkedInputStream.read(ChunkedInputStream.java:174) > at > org.apache.http.conn.EofSensorInputStream.read(EofSensorInputStream.java:137) > at > org.apache.solr.common.util.FastInputStream.readWrappedStream(FastInputStream.java:80) > at > org.apache.solr.common.util.FastInputStream.read(FastInputStream.java:114) > at > org.apache.solr.common.util.FastInputStream.readFully(FastInputStream.java:152) > at > org.apache.solr.handler.SnapPuller$DirectoryFileFetcher.fetchPackets(SnapPuller.java:1239) > at > org.apache.solr.handler.SnapPuller$DirectoryFileFetcher.fetchFile(SnapPuller.java:1187) > at > org.apache.solr.handler.SnapPuller.downloadIndexFiles(SnapPuller.java:774) > at > org.apache.solr.handler.SnapPuller.fetchLatestIndex(SnapPuller.java:424) > at > org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:337) > at > org.apache.solr.handler.ReplicationHandler$1.run(ReplicationHandler.java:227) > "process reaper" daemon prio=10 tid=0x7f3c0409c000 nid=0x203b > waiting on condition [0x7f3c984e9000] >
[jira] [Created] (SOLR-6579) SnapPuller Replication blocks clean shutdown of tomcat
Philip Black-Knight created SOLR-6579: - Summary: SnapPuller Replication blocks clean shutdown of tomcat Key: SOLR-6579 URL: https://issues.apache.org/jira/browse/SOLR-6579 Project: Solr Issue Type: Bug Affects Versions: 4.10.1 Reporter: Philip Black-Knight main issue was described in the mailing list her: http://mail-archives.apache.org/mod_mbox/lucene-solr-user/201409.mbox/browser and here: but also including the quotes: original message from Nick {quote} Hello, I have solr 4.10 running on tomcat 7. I'm doing replication from one master to about 10 slaves, with standard configuration: {code} ${enable.master:false} commit startup schema.xml,stopwords.txt ${enable.slave:false} http://master:8080/solr/mycore 00:00:60 {code} It appears that if tomcat gets shutdown while solr is replicating, it prevents tomcat from shutting down fully. Immediately after receiving the shutdown command, a thread dump is logged into catalina.out (this may have been turned on by some configuration someone else on my team made). I removed some threads that didn't look related, mostly about tomcat session replication, or with names like "http-bio-8080-exec-10". {code} 62252 [http-bio-8080-exec-1] INFO org.apache.solr.core.SolrCore – [mycore] webapp=/solr path=/replication params={command=details&_=1412014928648&wt=json} status=0 QTime=6 63310 [http-bio-8080-exec-1] INFO org.apache.solr.core.SolrCore – [mycore] webapp=/solr path=/replication params={command=details&_=1412014929699&wt=json} status=0 QTime=6 2014-09-29 14:22:10 Full thread dump Java HotSpot(TM) 64-Bit Server VM (24.65-b04 mixed mode): "fsyncService-12-thread-1" prio=10 tid=0x7f3bd4002000 nid=0x203d waiting on condition [0x7f3c271f] java.lang.Thread.State: WAITING (parking) at sun.misc.Unsafe.park(Native Method) - parking to wait for <0x0007e1ff4458> (a java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject) at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2043) at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1068) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) "explicit-fetchindex-cmd" daemon prio=10 tid=0x7f3c0413e800 nid=0x203c runnable [0x7f3c272f1000] java.lang.Thread.State: RUNNABLE at java.net.SocketInputStream.socketRead0(Native Method) at java.net.SocketInputStream.read(SocketInputStream.java:152) at java.net.SocketInputStream.read(SocketInputStream.java:122) at org.apache.http.impl.io.AbstractSessionInputBuffer.read(AbstractSessionInputBuffer.java:198) at org.apache.http.impl.io.ChunkedInputStream.read(ChunkedInputStream.java:174) at org.apache.http.conn.EofSensorInputStream.read(EofSensorInputStream.java:137) at org.apache.solr.common.util.FastInputStream.readWrappedStream(FastInputStream.java:80) at org.apache.solr.common.util.FastInputStream.read(FastInputStream.java:114) at org.apache.solr.common.util.FastInputStream.readFully(FastInputStream.java:152) at org.apache.solr.handler.SnapPuller$DirectoryFileFetcher.fetchPackets(SnapPuller.java:1239) at org.apache.solr.handler.SnapPuller$DirectoryFileFetcher.fetchFile(SnapPuller.java:1187) at org.apache.solr.handler.SnapPuller.downloadIndexFiles(SnapPuller.java:774) at org.apache.solr.handler.SnapPuller.fetchLatestIndex(SnapPuller.java:424) at org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:337) at org.apache.solr.handler.ReplicationHandler$1.run(ReplicationHandler.java:227) "process reaper" daemon prio=10 tid=0x7f3c0409c000 nid=0x203b waiting on condition [0x7f3c984e9000] java.lang.Thread.State: TIMED_WAITING (parking) at sun.misc.Unsafe.park(Native Method) - parking to wait for <0x0007dfbfd890> (a java.util.concurrent.SynchronousQueue$TransferStack) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:226) at java.util.concurrent.SynchronousQueue$TransferStack.awaitFulfill(SynchronousQueue.java:460) at java.util.concurrent.SynchronousQu
[jira] [Commented] (LUCENE-5879) Add auto-prefix terms to block tree terms dict
[ https://issues.apache.org/jira/browse/LUCENE-5879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156579#comment-14156579 ] Michael McCandless commented on LUCENE-5879: OK I'll add a comment explaining it... > Add auto-prefix terms to block tree terms dict > -- > > Key: LUCENE-5879 > URL: https://issues.apache.org/jira/browse/LUCENE-5879 > Project: Lucene - Core > Issue Type: New Feature > Components: core/codecs >Reporter: Michael McCandless >Assignee: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch > > > This cool idea to generalize numeric/trie fields came from Adrien: > Today, when we index a numeric field (LongField, etc.) we pre-compute > (via NumericTokenStream) outside of indexer/codec which prefix terms > should be indexed. > But this can be inefficient: you set a static precisionStep, and > always add those prefix terms regardless of how the terms in the field > are actually distributed. Yet typically in real world applications > the terms have a non-random distribution. > So, it should be better if instead the terms dict decides where it > makes sense to insert prefix terms, based on how dense the terms are > in each region of term space. > This way we can speed up query time for both term (e.g. infix > suggester) and numeric ranges, and it should let us use less index > space and get faster range queries. > > This would also mean that min/maxTerm for a numeric field would now be > correct, vs today where the externally computed prefix terms are > placed after the full precision terms, causing hairy code like > NumericUtils.getMaxInt/Long. So optos like LUCENE-5860 become > feasible. > The terms dict can also do tricks not possible if you must live on top > of its APIs, e.g. to handle the adversary/over-constrained case when a > given prefix has too many terms following it but finer prefixes > have too few (what block tree calls "floor term blocks"). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-5.x-Linux (64bit/jdk1.8.0_40-ea-b04) - Build # 11217 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-Linux/11217/ Java: 64bit/jdk1.8.0_40-ea-b04 -XX:-UseCompressedOops -XX:+UseParallelGC 2 tests failed. REGRESSION: org.apache.solr.cloud.DeleteLastCustomShardedReplicaTest.testDistribSearch Error Message: No live SolrServers available to handle this request:[http://127.0.0.1:54363, http://127.0.0.1:54959, http://127.0.0.1:38326] Stack Trace: org.apache.solr.client.solrj.SolrServerException: No live SolrServers available to handle this request:[http://127.0.0.1:54363, http://127.0.0.1:54959, http://127.0.0.1:38326] at __randomizedtesting.SeedInfo.seed([6D5078760DF5352C:ECB6F66E7AAA5510]:0) at org.apache.solr.client.solrj.impl.LBHttpSolrServer.request(LBHttpSolrServer.java:322) at org.apache.solr.client.solrj.impl.CloudSolrServer.sendRequest(CloudSolrServer.java:880) at org.apache.solr.client.solrj.impl.CloudSolrServer.requestWithRetryOnStaleState(CloudSolrServer.java:658) at org.apache.solr.client.solrj.impl.CloudSolrServer.request(CloudSolrServer.java:601) at org.apache.solr.cloud.DeleteLastCustomShardedReplicaTest.removeAndWaitForLastReplicaGone(DeleteLastCustomShardedReplicaTest.java:117) at org.apache.solr.cloud.DeleteLastCustomShardedReplicaTest.doTest(DeleteLastCustomShardedReplicaTest.java:107) at org.apache.solr.BaseDistributedSearchTestCase.testDistribSearch(BaseDistributedSearchTestCase.java:869) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:3
[jira] [Commented] (LUCENE-5879) Add auto-prefix terms to block tree terms dict
[ https://issues.apache.org/jira/browse/LUCENE-5879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156560#comment-14156560 ] Robert Muir commented on LUCENE-5879: - I think its enough to just add a comment to explain what is happening. Its similar to seeing a "BitsetHashMap" or something. people are going to be very confused unless they have the 'rewind' explanation above. > Add auto-prefix terms to block tree terms dict > -- > > Key: LUCENE-5879 > URL: https://issues.apache.org/jira/browse/LUCENE-5879 > Project: Lucene - Core > Issue Type: New Feature > Components: core/codecs >Reporter: Michael McCandless >Assignee: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch > > > This cool idea to generalize numeric/trie fields came from Adrien: > Today, when we index a numeric field (LongField, etc.) we pre-compute > (via NumericTokenStream) outside of indexer/codec which prefix terms > should be indexed. > But this can be inefficient: you set a static precisionStep, and > always add those prefix terms regardless of how the terms in the field > are actually distributed. Yet typically in real world applications > the terms have a non-random distribution. > So, it should be better if instead the terms dict decides where it > makes sense to insert prefix terms, based on how dense the terms are > in each region of term space. > This way we can speed up query time for both term (e.g. infix > suggester) and numeric ranges, and it should let us use less index > space and get faster range queries. > > This would also mean that min/maxTerm for a numeric field would now be > correct, vs today where the externally computed prefix terms are > placed after the full precision terms, causing hairy code like > NumericUtils.getMaxInt/Long. So optos like LUCENE-5860 become > feasible. > The terms dict can also do tricks not possible if you must live on top > of its APIs, e.g. to handle the adversary/over-constrained case when a > given prefix has too many terms following it but finer prefixes > have too few (what block tree calls "floor term blocks"). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5879) Add auto-prefix terms to block tree terms dict
[ https://issues.apache.org/jira/browse/LUCENE-5879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156547#comment-14156547 ] Michael McCandless commented on LUCENE-5879: Maybe we can somehow change the PostingsWriterBase API, so it only gets a "thingy that lets you pull docs/docsAndPositions as many times as you want" ... but I don't think that should block committing here? > Add auto-prefix terms to block tree terms dict > -- > > Key: LUCENE-5879 > URL: https://issues.apache.org/jira/browse/LUCENE-5879 > Project: Lucene - Core > Issue Type: New Feature > Components: core/codecs >Reporter: Michael McCandless >Assignee: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch > > > This cool idea to generalize numeric/trie fields came from Adrien: > Today, when we index a numeric field (LongField, etc.) we pre-compute > (via NumericTokenStream) outside of indexer/codec which prefix terms > should be indexed. > But this can be inefficient: you set a static precisionStep, and > always add those prefix terms regardless of how the terms in the field > are actually distributed. Yet typically in real world applications > the terms have a non-random distribution. > So, it should be better if instead the terms dict decides where it > makes sense to insert prefix terms, based on how dense the terms are > in each region of term space. > This way we can speed up query time for both term (e.g. infix > suggester) and numeric ranges, and it should let us use less index > space and get faster range queries. > > This would also mean that min/maxTerm for a numeric field would now be > correct, vs today where the externally computed prefix terms are > placed after the full precision terms, causing hairy code like > NumericUtils.getMaxInt/Long. So optos like LUCENE-5860 become > feasible. > The terms dict can also do tricks not possible if you must live on top > of its APIs, e.g. to handle the adversary/over-constrained case when a > given prefix has too many terms following it but finer prefixes > have too few (what block tree calls "floor term blocks"). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-trunk-Windows (32bit/jdk1.8.0_20) - Build # 4349 - Failure!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Windows/4349/ Java: 32bit/jdk1.8.0_20 -client -XX:+UseSerialGC 1 tests failed. REGRESSION: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([BA0038D9343EAE8A:4566B5E55F46D394]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 10975 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanTest [junit4]
[JENKINS] Lucene-Solr-NightlyTests-trunk - Build # 645 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-trunk/645/ 2 tests failed. REGRESSION: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([29DDB92365BE42DE:D6BB341F0EC63FC0]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) FAILED: org.apache.solr.cloud.CollectionsAPIDistributedZkTest.testDistribSearch Error Message: Error CREATEing SolrCore 'halfcollection_shard1_replica1
[JENKINS] Lucene-Solr-trunk-Linux (32bit/jdk1.9.0-ea-b28) - Build # 11369 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/11369/ Java: 32bit/jdk1.9.0-ea-b28 -client -XX:+UseParallelGC 1 tests failed. FAILED: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([9AD1764882BD6FFD:65B7FB74E9C512E3]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:484) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 11398 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanTest [junit4
[jira] [Commented] (SOLR-6562) Function query calculates the wrong value
[ https://issues.apache.org/jira/browse/SOLR-6562?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156446#comment-14156446 ] Stefan Neumüller commented on SOLR-6562: I found a way to add hours to a date without a too big precision loss. I use ms to subtract a "negative" date with the correct amount of hours. For example if I want to add 4 hours to mydate I write: ms(mydate,1969-12-31T20:00:00.000Z). > Function query calculates the wrong value > - > > Key: SOLR-6562 > URL: https://issues.apache.org/jira/browse/SOLR-6562 > Project: Solr > Issue Type: Bug >Affects Versions: 4.9 >Reporter: Stefan Neumüller >Priority: Critical > > This calculation > fl=sub(sum(abs(sub(1416906516710,141678360)),abs(sub(1036800,1416906516710))),10226321640) > should return 0. But the calculated value is 8388608 -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6511) Fencepost error in LeaderInitiatedRecoveryThread
[ https://issues.apache.org/jira/browse/SOLR-6511?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156445#comment-14156445 ] Shalin Shekhar Mangar commented on SOLR-6511: - Tim, I've committed SOLR-6530 on trunk. I'll merge it to branch_5x after you merge these changes. > Fencepost error in LeaderInitiatedRecoveryThread > > > Key: SOLR-6511 > URL: https://issues.apache.org/jira/browse/SOLR-6511 > Project: Solr > Issue Type: Bug >Reporter: Alan Woodward >Assignee: Timothy Potter > Attachments: SOLR-6511.patch, SOLR-6511.patch > > > At line 106: > {code} > while (continueTrying && ++tries < maxTries) { > {code} > should be > {code} > while (continueTrying && ++tries <= maxTries) { > {code} > This is only a problem when called from DistributedUpdateProcessor, as it can > have maxTries set to 1, which means the loop is never actually run. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5879) Add auto-prefix terms to block tree terms dict
[ https://issues.apache.org/jira/browse/LUCENE-5879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156389#comment-14156389 ] Robert Muir commented on LUCENE-5879: - {quote} We need this stub class because of the API the terms dict uses when asking the postings format to write one term's postings: we pass TermsEnum to PostingsWriterBase.writeTerm. This is e.g. for PF's that may want to iterate docs/positions multiple times when writing one term ... {quote} I think thats the root of the problem causing my confusion? I guess TermsEnum is ok here, but its much more than "docsEnum that you can rewind" (and not obvious for that!). I think thats why i freaked out when i saw FixedBitSetTermsEnum passing null and only implementing one method, it just didnt make a lot of sense. > Add auto-prefix terms to block tree terms dict > -- > > Key: LUCENE-5879 > URL: https://issues.apache.org/jira/browse/LUCENE-5879 > Project: Lucene - Core > Issue Type: New Feature > Components: core/codecs >Reporter: Michael McCandless >Assignee: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch > > > This cool idea to generalize numeric/trie fields came from Adrien: > Today, when we index a numeric field (LongField, etc.) we pre-compute > (via NumericTokenStream) outside of indexer/codec which prefix terms > should be indexed. > But this can be inefficient: you set a static precisionStep, and > always add those prefix terms regardless of how the terms in the field > are actually distributed. Yet typically in real world applications > the terms have a non-random distribution. > So, it should be better if instead the terms dict decides where it > makes sense to insert prefix terms, based on how dense the terms are > in each region of term space. > This way we can speed up query time for both term (e.g. infix > suggester) and numeric ranges, and it should let us use less index > space and get faster range queries. > > This would also mean that min/maxTerm for a numeric field would now be > correct, vs today where the externally computed prefix terms are > placed after the full precision terms, causing hairy code like > NumericUtils.getMaxInt/Long. So optos like LUCENE-5860 become > feasible. > The terms dict can also do tricks not possible if you must live on top > of its APIs, e.g. to handle the adversary/over-constrained case when a > given prefix has too many terms following it but finer prefixes > have too few (what block tree calls "floor term blocks"). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6530) Commits under network partition can put any node in down state
[ https://issues.apache.org/jira/browse/SOLR-6530?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156388#comment-14156388 ] ASF subversion and git services commented on SOLR-6530: --- Commit 1628945 from sha...@apache.org in branch 'dev/trunk' [ https://svn.apache.org/r1628945 ] SOLR-6530: Commits under network partitions can put any node in down state > Commits under network partition can put any node in down state > -- > > Key: SOLR-6530 > URL: https://issues.apache.org/jira/browse/SOLR-6530 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Shalin Shekhar Mangar >Priority: Critical > Fix For: 5.0, Trunk > > Attachments: SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch > > > Commits are executed by any node in SolrCloud i.e. they're not routed via the > leader like other updates. > # Suppose there's 1 collection, 1 shard, 2 replicas (A and B) and A is the > leader > # Suppose a commit request is made to node B during a time where B cannot > talk to A due to a partition for any reason (failing switch, heavy GC, > whatever) > # B fails to distribute the commit to A (times out) and asks A to recover > # This was okay earlier because a leader just ignores recovery requests but > with leader initiated recovery code, B puts A in the "down" state and A can > never get out of that state. > tl;dr; During network partitions, if enough commit/optimize requests are sent > to the cluster, all the nodes in the cluster will eventually be marked as > "down". -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6530) Commits under network partition can put any node in down state
[ https://issues.apache.org/jira/browse/SOLR-6530?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Shalin Shekhar Mangar updated SOLR-6530: Summary: Commits under network partition can put any node in down state (was: Commits under network partition can put any node in down state by any node) > Commits under network partition can put any node in down state > -- > > Key: SOLR-6530 > URL: https://issues.apache.org/jira/browse/SOLR-6530 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Shalin Shekhar Mangar >Priority: Critical > Fix For: 5.0, Trunk > > Attachments: SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch > > > Commits are executed by any node in SolrCloud i.e. they're not routed via the > leader like other updates. > # Suppose there's 1 collection, 1 shard, 2 replicas (A and B) and A is the > leader > # Suppose a commit request is made to node B during a time where B cannot > talk to A due to a partition for any reason (failing switch, heavy GC, > whatever) > # B fails to distribute the commit to A (times out) and asks A to recover > # This was okay earlier because a leader just ignores recovery requests but > with leader initiated recovery code, B puts A in the "down" state and A can > never get out of that state. > tl;dr; During network partitions, if enough commit/optimize requests are sent > to the cluster, all the nodes in the cluster will eventually be marked as > "down". -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6266) Couchbase plug-in for Solr
[ https://issues.apache.org/jira/browse/SOLR-6266?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156376#comment-14156376 ] Karol Abramczyk commented on SOLR-6266: --- commons-io-2.4 is required by couchbase-capi-server project used in this plugin > Couchbase plug-in for Solr > -- > > Key: SOLR-6266 > URL: https://issues.apache.org/jira/browse/SOLR-6266 > Project: Solr > Issue Type: New Feature >Reporter: Varun >Assignee: Joel Bernstein > Attachments: solr-couchbase-plugin-0.0.3-SNAPSHOT.tar.gz, > solr-couchbase-plugin.tar.gz, solr-couchbase-plugin.tar.gz > > > It would be great if users could connect Couchbase and Solr so that updates > to Couchbase can automatically flow to Solr. Couchbase provides some very > nice API's which allow applications to mimic the behavior of a Couchbase > server so that it can receive updates via Couchbase's normal cross data > center replication (XDCR). > One possible design for this is to create a CouchbaseLoader that extends > ContentStreamLoader. This new loader would embed the couchbase api's that > listen for incoming updates from couchbase, then marshal the couchbase > updates into the normal Solr update process. > Instead of marshaling couchbase updates into the normal Solr update process, > we could also embed a SolrJ client to relay the request through the http > interfaces. This may be necessary if we have to handle mapping couchbase > "buckets" to Solr collections on the Solr side. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6530) Commits under network partition can put any node in down state by any node
[ https://issues.apache.org/jira/browse/SOLR-6530?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Shalin Shekhar Mangar updated SOLR-6530: Attachment: SOLR-6530.patch Here's a better patch which removes the redundant isLeader check and also logs if the error'd node is not in the replica list of the current replica. All tests passed. This is ready. > Commits under network partition can put any node in down state by any node > -- > > Key: SOLR-6530 > URL: https://issues.apache.org/jira/browse/SOLR-6530 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Shalin Shekhar Mangar >Priority: Critical > Fix For: 5.0, Trunk > > Attachments: SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch > > > Commits are executed by any node in SolrCloud i.e. they're not routed via the > leader like other updates. > # Suppose there's 1 collection, 1 shard, 2 replicas (A and B) and A is the > leader > # Suppose a commit request is made to node B during a time where B cannot > talk to A due to a partition for any reason (failing switch, heavy GC, > whatever) > # B fails to distribute the commit to A (times out) and asks A to recover > # This was okay earlier because a leader just ignores recovery requests but > with leader initiated recovery code, B puts A in the "down" state and A can > never get out of that state. > tl;dr; During network partitions, if enough commit/optimize requests are sent > to the cluster, all the nodes in the cluster will eventually be marked as > "down". -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-6578) Update commons-io dependency to the latest 2.4 version
Karol Abramczyk created SOLR-6578: - Summary: Update commons-io dependency to the latest 2.4 version Key: SOLR-6578 URL: https://issues.apache.org/jira/browse/SOLR-6578 Project: Solr Issue Type: Improvement Reporter: Karol Abramczyk Priority: Minor Latest commons-io version is 2.4, but Solr 4.10 still uses 2.3. It is also required by the Couchbase plugin for Solr (SOLR-6266) that Solr uses commons-io-2.4. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
Re: [JENKINS] Lucene-Solr-trunk-Linux (32bit/jdk1.8.0_40-ea-b04) - Build # 11368 - Still Failing!
This was introduced by SOLR-6564. I committed a fix. On Thu, Oct 2, 2014 at 1:48 PM, Policeman Jenkins Server < jenk...@thetaphi.de> wrote: > Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/11368/ > Java: 32bit/jdk1.8.0_40-ea-b04 -server -XX:+UseParallelGC > > 1 tests failed. > FAILED: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo > > Error Message: > > > Stack Trace: > java.lang.AssertionError > at > __randomizedtesting.SeedInfo.seed([1F4ADE1311BCBE8D:E02C532F7AC4C393]:0) > at org.junit.Assert.fail(Assert.java:92) > at org.junit.Assert.assertTrue(Assert.java:43) > at org.junit.Assert.assertNotNull(Assert.java:526) > at org.junit.Assert.assertNotNull(Assert.java:537) > at > org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:483) > at > com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) > at > com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) > at > com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) > at > com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) > at > com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) > at > org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) > at > org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) > at > com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) > at > org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) > at > org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) > at > org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) > at > com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) > at > com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) > at > com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) > at > com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) > at > com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) > at > com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) > at > com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) > at > com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) > at > com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) > at > com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) > at > org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) > at > org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) > at > com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) > at > com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) > at > com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) > at > com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) > at > com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) > at > com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) > at > org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) > at > org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) > at > org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) > at > org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) > at > com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(Stateme
[jira] [Commented] (SOLR-6564) Fix failing ExitableDirectoryReader tests for Solr
[ https://issues.apache.org/jira/browse/SOLR-6564?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156309#comment-14156309 ] ASF subversion and git services commented on SOLR-6564: --- Commit 1628928 from sha...@apache.org in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1628928 ] SOLR-6564: Fix failing SolrInfoMBeanTest > Fix failing ExitableDirectoryReader tests for Solr > -- > > Key: SOLR-6564 > URL: https://issues.apache.org/jira/browse/SOLR-6564 > Project: Solr > Issue Type: Test > Components: Tests >Reporter: Anshum Gupta >Assignee: Anshum Gupta > > ExitableDirectoryReader tests are failing as they enumerate over the terms in > less than 1ms (min timeAllowed value that case be set). Need to fix this. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6564) Fix failing ExitableDirectoryReader tests for Solr
[ https://issues.apache.org/jira/browse/SOLR-6564?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156308#comment-14156308 ] ASF subversion and git services commented on SOLR-6564: --- Commit 1628927 from sha...@apache.org in branch 'dev/trunk' [ https://svn.apache.org/r1628927 ] SOLR-6564: Fix failing SolrInfoMBeanTest > Fix failing ExitableDirectoryReader tests for Solr > -- > > Key: SOLR-6564 > URL: https://issues.apache.org/jira/browse/SOLR-6564 > Project: Solr > Issue Type: Test > Components: Tests >Reporter: Anshum Gupta >Assignee: Anshum Gupta > > ExitableDirectoryReader tests are failing as they enumerate over the terms in > less than 1ms (min timeAllowed value that case be set). Need to fix this. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (LUCENE-5984) Remove ChainedFilter
[ https://issues.apache.org/jira/browse/LUCENE-5984?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Adrien Grand resolved LUCENE-5984. -- Resolution: Fixed > Remove ChainedFilter > > > Key: LUCENE-5984 > URL: https://issues.apache.org/jira/browse/LUCENE-5984 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Adrien Grand >Priority: Minor > Attachments: LUCENE-5984.patch > > > I would like to suggest removing ChainedFilter. It is currently only used in > Solr's CurrencyField but could easily be replaced with a BooleanFilter and my > understanding of this filter is that it can generally be replaced with a > BooleanFilter. So let's drop it and suggest using BooleanFilter instead? -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5984) Remove ChainedFilter
[ https://issues.apache.org/jira/browse/LUCENE-5984?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156300#comment-14156300 ] ASF subversion and git services commented on LUCENE-5984: - Commit 1628925 from [~jpountz] in branch 'dev/branches/branch_5x' [ https://svn.apache.org/r1628925 ] LUCENE-5984: Remove ChainedFilter. > Remove ChainedFilter > > > Key: LUCENE-5984 > URL: https://issues.apache.org/jira/browse/LUCENE-5984 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Adrien Grand >Priority: Minor > Attachments: LUCENE-5984.patch > > > I would like to suggest removing ChainedFilter. It is currently only used in > Solr's CurrencyField but could easily be replaced with a BooleanFilter and my > understanding of this filter is that it can generally be replaced with a > BooleanFilter. So let's drop it and suggest using BooleanFilter instead? -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5979) Use the cost API instead of a heuristic on the first document in FilteredQuery to decide on whether to use random access
[ https://issues.apache.org/jira/browse/LUCENE-5979?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156301#comment-14156301 ] Adrien Grand commented on LUCENE-5979: -- If there are no concerns anymore, I will commit this patch soon. > Use the cost API instead of a heuristic on the first document in > FilteredQuery to decide on whether to use random access > > > Key: LUCENE-5979 > URL: https://issues.apache.org/jira/browse/LUCENE-5979 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Adrien Grand >Assignee: Adrien Grand >Priority: Minor > Fix For: 5.0 > > Attachments: LUCENE-5979.patch > > > Now that some major filters such as TermsFilter and > MultiTermQueryWrapperFilter return DocIdSets that have a better cost, we > should switch to the cost API. -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6530) Commits under network partition can put any node in down state by any node
[ https://issues.apache.org/jira/browse/SOLR-6530?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Shalin Shekhar Mangar updated SOLR-6530: Attachment: SOLR-6530.patch The last patch's test had a bug. It wasn't using the right proxies map. This is fixed now. > Commits under network partition can put any node in down state by any node > -- > > Key: SOLR-6530 > URL: https://issues.apache.org/jira/browse/SOLR-6530 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Reporter: Shalin Shekhar Mangar >Priority: Critical > Fix For: 5.0, Trunk > > Attachments: SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, > SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch, SOLR-6530.patch > > > Commits are executed by any node in SolrCloud i.e. they're not routed via the > leader like other updates. > # Suppose there's 1 collection, 1 shard, 2 replicas (A and B) and A is the > leader > # Suppose a commit request is made to node B during a time where B cannot > talk to A due to a partition for any reason (failing switch, heavy GC, > whatever) > # B fails to distribute the commit to A (times out) and asks A to recover > # This was okay earlier because a leader just ignores recovery requests but > with leader initiated recovery code, B puts A in the "down" state and A can > never get out of that state. > tl;dr; During network partitions, if enough commit/optimize requests are sent > to the cluster, all the nodes in the cluster will eventually be marked as > "down". -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-5984) Remove ChainedFilter
[ https://issues.apache.org/jira/browse/LUCENE-5984?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156296#comment-14156296 ] ASF subversion and git services commented on LUCENE-5984: - Commit 1628923 from [~jpountz] in branch 'dev/trunk' [ https://svn.apache.org/r1628923 ] LUCENE-5984: Remove ChainedFilter. > Remove ChainedFilter > > > Key: LUCENE-5984 > URL: https://issues.apache.org/jira/browse/LUCENE-5984 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Adrien Grand >Priority: Minor > Attachments: LUCENE-5984.patch > > > I would like to suggest removing ChainedFilter. It is currently only used in > Solr's CurrencyField but could easily be replaced with a BooleanFilter and my > understanding of this filter is that it can generally be replaced with a > BooleanFilter. So let's drop it and suggest using BooleanFilter instead? -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-5.x-Linux (64bit/jdk1.7.0_67) - Build # 11216 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-Linux/11216/ Java: 64bit/jdk1.7.0_67 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC 1 tests failed. FAILED: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([9FBC351B3103DA80:60DAB8275A7BA79E]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 11019 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanT
[JENKINS] Lucene-Solr-5.x-MacOSX (64bit/jdk1.8.0) - Build # 1823 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-5.x-MacOSX/1823/ Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseParallelGC 1 tests failed. REGRESSION: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([8C7ABB04EBD75218:731C363880AF2F06]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 11443 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanTest
[JENKINS] Lucene-Solr-SmokeRelease-trunk - Build # 207 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-trunk/207/ No tests ran. Build Log: [...truncated 50886 lines...] prepare-release-no-sign: [mkdir] Created dir: /usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/dist [copy] Copying 446 files to /usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/dist/lucene [copy] Copying 245 files to /usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/dist/solr [smoker] Java 1.7 JAVA_HOME=/home/jenkins/tools/java/latest1.7 [smoker] NOTE: output encoding is US-ASCII [smoker] [smoker] Load release URL "file:/usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/dist/"... [smoker] [smoker] Test Lucene... [smoker] test basics... [smoker] get KEYS [smoker] 0.1 MB in 0.01 sec (14.4 MB/sec) [smoker] check changes HTML... [smoker] download lucene-6.0.0-src.tgz... [smoker] 27.6 MB in 0.04 sec (663.1 MB/sec) [smoker] verify md5/sha1 digests [smoker] download lucene-6.0.0.tgz... [smoker] 61.0 MB in 0.16 sec (374.7 MB/sec) [smoker] verify md5/sha1 digests [smoker] download lucene-6.0.0.zip... [smoker] 70.5 MB in 0.10 sec (704.9 MB/sec) [smoker] verify md5/sha1 digests [smoker] unpack lucene-6.0.0.tgz... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] test demo with 1.7... [smoker] got 5573 hits for query "lucene" [smoker] checkindex with 1.7... [smoker] check Lucene's javadoc JAR [smoker] unpack lucene-6.0.0.zip... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] test demo with 1.7... [smoker] got 5573 hits for query "lucene" [smoker] checkindex with 1.7... [smoker] check Lucene's javadoc JAR [smoker] unpack lucene-6.0.0-src.tgz... [smoker] make sure no JARs/WARs in src dist... [smoker] run "ant validate" [smoker] run tests w/ Java 7 and testArgs='-Dtests.jettyConnector=Socket -Dtests.disableHdfs=true -Dtests.multiplier=1 -Dtests.slow=false'... [smoker] test demo with 1.7... [smoker] got 217 hits for query "lucene" [smoker] checkindex with 1.7... [smoker] generate javadocs w/ Java 7... [smoker] [smoker] Crawl/parse... [smoker] [smoker] Verify... [smoker] confirm all releases have coverage in TestBackwardsCompatibility [smoker] find all past Lucene releases... [smoker] run TestBackwardsCompatibility.. [smoker] success! [smoker] [smoker] Test Solr... [smoker] test basics... [smoker] get KEYS [smoker] 0.1 MB in 0.01 sec (9.3 MB/sec) [smoker] check changes HTML... [smoker] download solr-6.0.0-src.tgz... [smoker] 33.8 MB in 0.08 sec (400.5 MB/sec) [smoker] verify md5/sha1 digests [smoker] download solr-6.0.0.tgz... [smoker] 115.8 MB in 0.37 sec (313.1 MB/sec) [smoker] verify md5/sha1 digests [smoker] download solr-6.0.0.zip... [smoker] 121.9 MB in 0.23 sec (528.4 MB/sec) [smoker] verify md5/sha1 digests [smoker] unpack solr-6.0.0.tgz... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] unpack lucene-6.0.0.tgz... [smoker] **WARNING**: skipping check of /usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/tmp/unpack/solr-6.0.0/contrib/dataimporthandler-extras/lib/javax.mail-1.5.1.jar: it has javax.* classes [smoker] **WARNING**: skipping check of /usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/tmp/unpack/solr-6.0.0/contrib/dataimporthandler-extras/lib/activation-1.1.1.jar: it has javax.* classes [smoker] verify WAR metadata/contained JAR identity/no javax.* or java.* classes... [smoker] unpack lucene-6.0.0.tgz... [smoker] copying unpacked distribution for Java 7 ... [smoker] test solr example w/ Java 7... [smoker] start Solr instance (log=/usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/tmp/unpack/solr-6.0.0-java7/solr-example.log)... [smoker] startup done [smoker] test utf8... [smoker] index example docs... [smoker] run query... [smoker] stop server (SIGINT)... [smoker] unpack solr-6.0.0.zip... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] unpack lucene-6.0.0.tgz... [smoker] **WARNING**: skipping check of /usr/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-trunk/lucene/build/smokeTestRelease/tmp/unpack/solr-6.0.0/contrib/dataimporthandler-extras/lib/activati
[jira] [Commented] (LUCENE-5879) Add auto-prefix terms to block tree terms dict
[ https://issues.apache.org/jira/browse/LUCENE-5879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156222#comment-14156222 ] Michael McCandless commented on LUCENE-5879: bq. Patch looks good except for FixedBitSetTermsEnum. What is this doing? Can we remove it? I think its bogus how it does 'super(null)', its superclass should not even allow such a thing. I agree it's abusing FilterTermsEnum ... I'll fix these FilterLeafReader.FilterXXX classes to barf if they get null. We need this stub class because of the API the terms dict uses when asking the postings format to write one term's postings: we pass TermsEnum to PostingsWriterBase.writeTerm. This is e.g. for PF's that may want to iterate docs/positions multiple times when writing one term ... I'll fix it to directly subclass TermsEnum and override all methods... > Add auto-prefix terms to block tree terms dict > -- > > Key: LUCENE-5879 > URL: https://issues.apache.org/jira/browse/LUCENE-5879 > Project: Lucene - Core > Issue Type: New Feature > Components: core/codecs >Reporter: Michael McCandless >Assignee: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch > > > This cool idea to generalize numeric/trie fields came from Adrien: > Today, when we index a numeric field (LongField, etc.) we pre-compute > (via NumericTokenStream) outside of indexer/codec which prefix terms > should be indexed. > But this can be inefficient: you set a static precisionStep, and > always add those prefix terms regardless of how the terms in the field > are actually distributed. Yet typically in real world applications > the terms have a non-random distribution. > So, it should be better if instead the terms dict decides where it > makes sense to insert prefix terms, based on how dense the terms are > in each region of term space. > This way we can speed up query time for both term (e.g. infix > suggester) and numeric ranges, and it should let us use less index > space and get faster range queries. > > This would also mean that min/maxTerm for a numeric field would now be > correct, vs today where the externally computed prefix terms are > placed after the full precision terms, causing hairy code like > NumericUtils.getMaxInt/Long. So optos like LUCENE-5860 become > feasible. > The terms dict can also do tricks not possible if you must live on top > of its APIs, e.g. to handle the adversary/over-constrained case when a > given prefix has too many terms following it but finer prefixes > have too few (what block tree calls "floor term blocks"). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-trunk-Linux (32bit/jdk1.8.0_40-ea-b04) - Build # 11368 - Still Failing!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/11368/ Java: 32bit/jdk1.8.0_40-ea-b04 -server -XX:+UseParallelGC 1 tests failed. FAILED: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([1F4ADE1311BCBE8D:E02C532F7AC4C393]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 11014 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanTest [jun
[jira] [Commented] (LUCENE-5879) Add auto-prefix terms to block tree terms dict
[ https://issues.apache.org/jira/browse/LUCENE-5879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14156212#comment-14156212 ] Michael McCandless commented on LUCENE-5879: Thanks for reviewing Rob. bq. How can we prevent this from happening? I think we shouldn't add the FI option at this time? We should only add it once we make it more generic so that all codec impls can easily support it? > Add auto-prefix terms to block tree terms dict > -- > > Key: LUCENE-5879 > URL: https://issues.apache.org/jira/browse/LUCENE-5879 > Project: Lucene - Core > Issue Type: New Feature > Components: core/codecs >Reporter: Michael McCandless >Assignee: Michael McCandless > Fix For: 5.0, Trunk > > Attachments: LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, LUCENE-5879.patch, > LUCENE-5879.patch, LUCENE-5879.patch > > > This cool idea to generalize numeric/trie fields came from Adrien: > Today, when we index a numeric field (LongField, etc.) we pre-compute > (via NumericTokenStream) outside of indexer/codec which prefix terms > should be indexed. > But this can be inefficient: you set a static precisionStep, and > always add those prefix terms regardless of how the terms in the field > are actually distributed. Yet typically in real world applications > the terms have a non-random distribution. > So, it should be better if instead the terms dict decides where it > makes sense to insert prefix terms, based on how dense the terms are > in each region of term space. > This way we can speed up query time for both term (e.g. infix > suggester) and numeric ranges, and it should let us use less index > space and get faster range queries. > > This would also mean that min/maxTerm for a numeric field would now be > correct, vs today where the externally computed prefix terms are > placed after the full precision terms, causing hairy code like > NumericUtils.getMaxInt/Long. So optos like LUCENE-5860 become > feasible. > The terms dict can also do tricks not possible if you must live on top > of its APIs, e.g. to handle the adversary/over-constrained case when a > given prefix has too many terms following it but finer prefixes > have too few (what block tree calls "floor term blocks"). -- This message was sent by Atlassian JIRA (v6.3.4#6332) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-5.x-Java7 - Build # 2147 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-Tests-5.x-Java7/2147/ 1 tests failed. REGRESSION: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([14B5983D1FB1352B:EBD3150174C94835]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 11376 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanTest [junit4] 2> Creating dataDir: /usr/home/jenkins/jenki
[JENKINS] Lucene-Solr-trunk-MacOSX (64bit/jdk1.8.0) - Build # 1862 - Failure!
Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-MacOSX/1862/ Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseSerialGC 1 tests failed. REGRESSION: org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([439CAA0E7212DDB:FB5F479C8C5950C5]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.junit.Assert.assertNotNull(Assert.java:537) at org.apache.solr.SolrInfoMBeanTest.testCallMBeanInfo(SolrInfoMBeanTest.java:66) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:483) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1618) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:827) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:877) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:836) at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:738) at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:772) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:783) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:53) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule$1.evaluate(SystemPropertiesInvariantRule.java:55) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:43) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365) at java.lang.Thread.run(Thread.java:745) Build Log: [...truncated 11228 lines...] [junit4] Suite: org.apache.solr.SolrInfoMBeanTest