[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-9.0.4) - Build # 21751 - Still Unstable!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21751/
Java: 64bit/jdk-9.0.4 -XX:-UseCompressedOops -XX:+UseSerialGC

4 tests failed.
FAILED:  
org.apache.lucene.search.intervals.TestIntervalQuery.testOrderedNearQueryWidth3

Error Message:


Stack Trace:
java.lang.NullPointerException
at 
__randomizedtesting.SeedInfo.seed([A250BFD26322BA34:4F75683D8D02B035]:0)
at 
org.apache.lucene.search.intervals.IntervalFilter.docID(IntervalFilter.java:38)
at 
org.apache.lucene.search.intervals.IntervalScorer.docID(IntervalScorer.java:45)
at 
org.apache.lucene.search.AssertingScorer.(AssertingScorer.java:50)
at 
org.apache.lucene.search.AssertingScorer.wrap(AssertingScorer.java:33)
at 
org.apache.lucene.search.AssertingLeafCollector.setScorer(AssertingLeafCollector.java:45)
at 
org.apache.lucene.search.Weight$DefaultBulkScorer.score(Weight.java:182)
at org.apache.lucene.search.BulkScorer.score(BulkScorer.java:39)
at 
org.apache.lucene.search.AssertingBulkScorer.score(AssertingBulkScorer.java:71)
at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:623)
at 
org.apache.lucene.search.AssertingIndexSearcher.search(AssertingIndexSearcher.java:72)
at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:417)
at 
org.apache.lucene.search.QueryUtils.checkFirstSkipTo(QueryUtils.java:433)
at org.apache.lucene.search.QueryUtils.check(QueryUtils.java:119)
at org.apache.lucene.search.QueryUtils.check(QueryUtils.java:123)
at org.apache.lucene.search.CheckHits.checkHits(CheckHits.java:172)
at 
org.apache.lucene.search.intervals.TestIntervalQuery.checkHits(TestIntervalQuery.java:76)
at 
org.apache.lucene.search.intervals.TestIntervalQuery.testOrderedNearQueryWidth3(TestIntervalQuery.java:85)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.u

[JENKINS] Lucene-Solr-BadApples-Tests-7.x - Build # 31 - Failure

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-7.x/31/

No tests ran.

Build Log:
[...truncated 1780 lines...]
   [junit4] JVM J1: stdout was not empty, see: 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/build/core/test/temp/junit4-J1-20180404_034607_8616246385250962902160.sysout
   [junit4] >>> JVM J1 emitted unexpected output (verbatim) 
   [junit4] codec: FastDecompressionCompressingStoredFields, pf: FSTOrd50, dvf: 
Memory
   [junit4] <<< JVM J1: EOF 

[...truncated 10409 lines...]
   [junit4] Suite: org.apache.solr.cloud.autoscaling.ComputePlanActionTest
   [junit4]   2> 324117 INFO  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: 
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.autoscaling.ComputePlanActionTest_AE5AB9149E50D967-001/init-core-data-001
   [junit4]   2> 324189 WARN  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=2 numCloses=2
   [junit4]   2> 324245 INFO  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) 
w/NUMERIC_DOCVALUES_SYSPROP=false
   [junit4]   2> 324246 INFO  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (false) via: 
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
   [junit4]   2> 324247 INFO  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.c.MiniSolrCloudCluster Starting cluster of 1 servers in 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.autoscaling.ComputePlanActionTest_AE5AB9149E50D967-001/tempDir-001
   [junit4]   2> 324248 INFO  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 324248 INFO  (Thread-45) [] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 324248 INFO  (Thread-45) [] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 324250 ERROR (Thread-45) [] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 324348 INFO  
(SUITE-ComputePlanActionTest-seed#[AE5AB9149E50D967]-worker) [] 
o.a.s.c.ZkTestServer start zk server on port:41904
   [junit4]   2> 324351 INFO  (zkConnectionManagerCallback-85-thread-1) [] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 324382 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp: 
2017-11-22T09:27:37+12:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
   [junit4]   2> 324396 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 324396 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 324396 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.session Scavenging every 66ms
   [junit4]   2> 324396 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@487d739{/solr,null,AVAILABLE}
   [junit4]   2> 324462 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.AbstractConnector Started ServerConnector@86725e4{SSL,[ssl, 
http/1.1]}{127.0.0.1:33709}
   [junit4]   2> 324462 INFO  (jetty-launcher-82-thread-1) [] 
o.e.j.s.Server Started @324615ms
   [junit4]   2> 324462 INFO  (jetty-launcher-82-thread-1) [] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, 
hostPort=33709}
   [junit4]   2> 324463 ERROR (jetty-launcher-82-thread-1) [] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 324463 INFO  (jetty-launcher-82-thread-1) [] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 324463 INFO  (jetty-launcher-82-thread-1) [] 
o.a.s.s.SolrDispatchFilter  ___  _   Welcome to Apache Solr™ version 
7.4.0
   [junit4]   2> 324463 INFO  (jetty-launcher-82-thread-1) [] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 324463 INFO  (jetty-launcher-82-thread-1) [] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 324463 INFO  (jetty-launcher-82-thread-1) [] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|Start time: 
2018-04-04T04:46:56.551Z
   [junit4]   2> 324482 INFO  (zkConnectionManagerCallback-87-thread-1) [] 
o.a.s.c.c.ConnectionManage

[JENKINS] Lucene-Solr-7.x-MacOSX (64bit/jdk1.8.0) - Build # 557 - Failure!

2018-04-03 Thread Policeman Jenkins Server
Error processing tokens: Error while parsing action 
'Text/ZeroOrMore/FirstOf/Token/DelimitedToken/DelimitedToken_Action3' at input 
position (line 79, pos 4):
)"}
   ^

java.lang.OutOfMemoryError: Java heap space

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

[jira] [Commented] (SOLR-11913) SolrParams ought to implement Iterable>

2018-04-03 Thread Tapan Vaishnav (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11913?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424994#comment-16424994
 ] 

Tapan Vaishnav commented on SOLR-11913:
---

[~dsmiley] Thanks for your reply.
I have updated few getParameterNamesiterator() callers and fixed some bugs into 
the latest patch.
Please have a look and let me know your thoughts.

> SolrParams ought to implement Iterable>
> --
>
> Key: SOLR-11913
> URL: https://issues.apache.org/jira/browse/SOLR-11913
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: David Smiley
>Priority: Minor
>  Labels: newdev
> Attachments: SOLR-11913.patch, SOLR-11913.patch, SOLR-11913.patch, 
> SOLR-11913_v2.patch
>
>
> SolrJ ought to implement {{Iterable>}} so that 
> it's easier to iterate on it, either using Java 5 for-each style, or Java 8 
> streams.  The implementation on ModifiableSolrParams can delegate through to 
> the underlying LinkedHashMap entry set.  The default impl can produce a 
> Map.Entry with a getValue that calls through to getParams.  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-11913) SolrParams ought to implement Iterable>

2018-04-03 Thread Tapan Vaishnav (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11913?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Tapan Vaishnav updated SOLR-11913:
--
Attachment: SOLR-11913.patch

> SolrParams ought to implement Iterable>
> --
>
> Key: SOLR-11913
> URL: https://issues.apache.org/jira/browse/SOLR-11913
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: David Smiley
>Priority: Minor
>  Labels: newdev
> Attachments: SOLR-11913.patch, SOLR-11913.patch, SOLR-11913.patch, 
> SOLR-11913_v2.patch
>
>
> SolrJ ought to implement {{Iterable>}} so that 
> it's easier to iterate on it, either using Java 5 for-each style, or Java 8 
> streams.  The implementation on ModifiableSolrParams can delegate through to 
> the underlying LinkedHashMap entry set.  The default impl can produce a 
> Map.Entry with a getValue that calls through to getParams.  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8236) GeoPath behavior with identical points

2018-04-03 Thread Ignacio Vera (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8236?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424992#comment-16424992
 ] 

Ignacio Vera commented on LUCENE-8236:
--

Attached a patch with the logic for filtering points.

> GeoPath behavior with identical points
> --
>
> Key: LUCENE-8236
> URL: https://issues.apache.org/jira/browse/LUCENE-8236
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/spatial3d
>Reporter: Ignacio Vera
>Priority: Minor
> Attachments: LUCENE-8326.patch
>
>
> GeoPath has the current behavior:
>  * When provided a path with two consecutive identical points: In all cases 
> it generates an {{IllegalArgumentException}} because it tries to build a 
> plane with those two points.
>  * When provided a path with two consecutive numerical identical points: In 
> case of {{GeoStandardPath}} it throws an {{IllegalArgumentException}} because 
> the path is too short. The case of {{GeoDegeneratePath}} is more complicated 
> as it builds the path but the plane can be bogus. In some cases points on the 
> other side of the world can be "In Set".
> I think the factory should filter out these points, in the same way it is 
> done for {{GeoPolygon}}. If this is not the desired behavior then the factory 
>  should throw a consistent {{IllegalArgumentException}} in all cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (LUCENE-8236) GeoPath behavior with identical points

2018-04-03 Thread Ignacio Vera (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-8236?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Ignacio Vera updated LUCENE-8236:
-
Attachment: LUCENE-8326.patch

> GeoPath behavior with identical points
> --
>
> Key: LUCENE-8236
> URL: https://issues.apache.org/jira/browse/LUCENE-8236
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/spatial3d
>Reporter: Ignacio Vera
>Priority: Minor
> Attachments: LUCENE-8326.patch
>
>
> GeoPath has the current behavior:
>  * When provided a path with two consecutive identical points: In all cases 
> it generates an {{IllegalArgumentException}} because it tries to build a 
> plane with those two points.
>  * When provided a path with two consecutive numerical identical points: In 
> case of {{GeoStandardPath}} it throws an {{IllegalArgumentException}} because 
> the path is too short. The case of {{GeoDegeneratePath}} is more complicated 
> as it builds the path but the plane can be bogus. In some cases points on the 
> other side of the world can be "In Set".
> I think the factory should filter out these points, in the same way it is 
> done for {{GeoPolygon}}. If this is not the desired behavior then the factory 
>  should throw a consistent {{IllegalArgumentException}} in all cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (LUCENE-8236) GeoPath behavior with identical points

2018-04-03 Thread Ignacio Vera (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-8236?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Ignacio Vera updated LUCENE-8236:
-
Summary: GeoPath behavior with identical points  (was: GeoPath behavior 
with indentical points)

> GeoPath behavior with identical points
> --
>
> Key: LUCENE-8236
> URL: https://issues.apache.org/jira/browse/LUCENE-8236
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/spatial3d
>Reporter: Ignacio Vera
>Priority: Minor
>
> GeoPath has the current behavior:
>  * When provided a path with two consecutive identical points: In all cases 
> it generates an {{IllegalArgumentException}} because it tries to build a 
> plane with those two points.
>  * When provided a path with two consecutive numerical identical points: In 
> case of {{GeoStandardPath}} it throws an {{IllegalArgumentException}} because 
> the path is too short. The case of {{GeoDegeneratePath}} is more complicated 
> as it builds the path but the plane can be bogus. In some cases points on the 
> other side of the world can be "In Set".
> I think the factory should filter out these points, in the same way it is 
> done for {{GeoPolygon}}. If this is not the desired behavior then the factory 
>  should throw a consistent {{IllegalArgumentException}} in all cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (LUCENE-8236) GeoPath behavior with indentical points

2018-04-03 Thread Ignacio Vera (JIRA)
Ignacio Vera created LUCENE-8236:


 Summary: GeoPath behavior with indentical points
 Key: LUCENE-8236
 URL: https://issues.apache.org/jira/browse/LUCENE-8236
 Project: Lucene - Core
  Issue Type: Bug
  Components: modules/spatial3d
Reporter: Ignacio Vera


GeoPath has the current behavior:
 * When provided a path with two consecutive identical points: In all cases it 
generates an {{IllegalArgumentException}} because it tries to build a plane 
with those two points.
 * When provided a path with two consecutive numerical identical points: In 
case of {{GeoStandardPath}} it throws an {{IllegalArgumentException}} because 
the path is too short. The case of {{GeoDegeneratePath}} is more complicated as 
it builds the path but the plane can be bogus. In some cases points on the 
other side of the world can be "In Set".

I think the factory should filter out these points, in the same way it is done 
for {{GeoPolygon}}. If this is not the desired behavior then the factory  
should throw a consistent {{IllegalArgumentException}} in all cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (LUCENE-8235) Can't change Single Valued field to Multi Valued even by deleting/readding

2018-04-03 Thread Cetra Free (JIRA)
Cetra Free created LUCENE-8235:
--

 Summary: Can't change Single Valued field to Multi Valued even by 
deleting/readding
 Key: LUCENE-8235
 URL: https://issues.apache.org/jira/browse/LUCENE-8235
 Project: Lucene - Core
  Issue Type: Bug
Reporter: Cetra Free


Basically from here: https://issues.apache.org/jira/browse/SOLR-12185



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12185) Can't change Single Valued field to Multi Valued even by deleting/readding

2018-04-03 Thread Cetra Free (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12185?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424961#comment-16424961
 ] 

Cetra Free commented on SOLR-12185:
---

Should I raise this with Lucene then?

> Can't change Single Valued field to Multi Valued even by deleting/readding
> --
>
> Key: SOLR-12185
> URL: https://issues.apache.org/jira/browse/SOLR-12185
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Schema and Analysis
>Affects Versions: 7.1
>Reporter: Cetra Free
>Priority: Major
>
> Changing a single-valued field to multi-valued field with doc values breaks 
> things.  This doesn't matter if you change the field or do a complete delete 
> and re-add of the field.  The only way I have found to "fix" this is to 
> delete the entire core from disk and re-add it.
> h2. Steps to replicate:
>  * Create a field, make it single valued with doc values
>  * Index a couple of docs
>  * Delete the field
>  * Add the field again with the same name, but change it to multiValued
>  * Try indexing a couple of docs
> h2. Expected result:
> The documents are indexed correctly and there are no issues
> h2. Actual outcome:
> The documents refuse to be indexed and you see this in the logs:
> {code:java}
> org.apache.solr.common.SolrException: Exception writing document id 
> 6a3226c8-c904-40d7-aecb-76c3515db7b8 to the index; possible analysis error: 
> cannot change DocValues type from SORTED to SORTED_SET for field 
> "example_field"
>     at 
> org.apache.solr.update.DirectUpdateHandler2.addDoc(DirectUpdateHandler2.java:221)
>     at 
> org.apache.solr.update.processor.RunUpdateProcessor.processAdd(RunUpdateProcessorFactory.java:67)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.doLocalAdd(DistributedUpdateProcessor.java:991)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.versionAdd(DistributedUpdateProcessor.java:1207)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.processAdd(DistributedUpdateProcessor.java:753)
>     at 
> org.apache.solr.update.processor.LogUpdateProcessorFactory$LogUpdateProcessor.processAdd(LogUpdateProcessorFactory.java:103)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.AddSchemaFieldsUpdateProcessorFactory$AddSchemaFieldsUpdateProcessor.processAdd(AddSchemaFieldsUpdateProcessorFactory.java:474)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldNameMutatingUpdateProcessorFactory$1.processAdd(FieldNameMutatingUpdateProcessorFactory.java:74)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.AbstractDefaultValueUpdateProcessorFactory$DefaultValueUpdateProcessor.processAdd(AbstractDefaultValueUpdateProcessorFactory.java:91)
>     at 
> org.apache.solr.handler.dataimport.SolrWriter.upload(SolrWriter.java:80)
>     at 
> org.apache.solr.handler.dataimport.DataImportHandler$1.upload(DataImportHandler.java:257)
>     at 
> org.apache.solr.handler.dataimport.DocBuilder.buildDocument(DocBuilder.java:527)
>     at 
> org.apache.sol

[jira] [Commented] (SOLR-12186) XPathEntityProcessor with useSolrAddSchema does not add nested child documents

2018-04-03 Thread Erick Erickson (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12186?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424951#comment-16424951
 ] 

Erick Erickson commented on SOLR-12186:
---

Could you submit a documentation patch that describes the behavior?

> XPathEntityProcessor with useSolrAddSchema does not add nested child documents
> --
>
> Key: SOLR-12186
> URL: https://issues.apache.org/jira/browse/SOLR-12186
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: contrib - DataImportHandler
>Affects Versions: 7.1
>Reporter: Cetra Free
>Priority: Major
>
> When using {{useSolrAddSchema=true}} this does not support child nested 
> documents as per the normal update handler.
> I would expect this to either be mentioned in the documentation as a 
> limitation, or supported.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12185) Can't change Single Valued field to Multi Valued even by deleting/readding

2018-04-03 Thread Erick Erickson (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12185?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Erick Erickson resolved SOLR-12185.
---
Resolution: Information Provided

> Can't change Single Valued field to Multi Valued even by deleting/readding
> --
>
> Key: SOLR-12185
> URL: https://issues.apache.org/jira/browse/SOLR-12185
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Schema and Analysis
>Affects Versions: 7.1
>Reporter: Cetra Free
>Priority: Major
>
> Changing a single-valued field to multi-valued field with doc values breaks 
> things.  This doesn't matter if you change the field or do a complete delete 
> and re-add of the field.  The only way I have found to "fix" this is to 
> delete the entire core from disk and re-add it.
> h2. Steps to replicate:
>  * Create a field, make it single valued with doc values
>  * Index a couple of docs
>  * Delete the field
>  * Add the field again with the same name, but change it to multiValued
>  * Try indexing a couple of docs
> h2. Expected result:
> The documents are indexed correctly and there are no issues
> h2. Actual outcome:
> The documents refuse to be indexed and you see this in the logs:
> {code:java}
> org.apache.solr.common.SolrException: Exception writing document id 
> 6a3226c8-c904-40d7-aecb-76c3515db7b8 to the index; possible analysis error: 
> cannot change DocValues type from SORTED to SORTED_SET for field 
> "example_field"
>     at 
> org.apache.solr.update.DirectUpdateHandler2.addDoc(DirectUpdateHandler2.java:221)
>     at 
> org.apache.solr.update.processor.RunUpdateProcessor.processAdd(RunUpdateProcessorFactory.java:67)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.doLocalAdd(DistributedUpdateProcessor.java:991)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.versionAdd(DistributedUpdateProcessor.java:1207)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.processAdd(DistributedUpdateProcessor.java:753)
>     at 
> org.apache.solr.update.processor.LogUpdateProcessorFactory$LogUpdateProcessor.processAdd(LogUpdateProcessorFactory.java:103)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.AddSchemaFieldsUpdateProcessorFactory$AddSchemaFieldsUpdateProcessor.processAdd(AddSchemaFieldsUpdateProcessorFactory.java:474)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldNameMutatingUpdateProcessorFactory$1.processAdd(FieldNameMutatingUpdateProcessorFactory.java:74)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.AbstractDefaultValueUpdateProcessorFactory$DefaultValueUpdateProcessor.processAdd(AbstractDefaultValueUpdateProcessorFactory.java:91)
>     at 
> org.apache.solr.handler.dataimport.SolrWriter.upload(SolrWriter.java:80)
>     at 
> org.apache.solr.handler.dataimport.DataImportHandler$1.upload(DataImportHandler.java:257)
>     at 
> org.apache.solr.handler.dataimport.DocBuilder.buildDocument(DocBuilder.java:527)
>     at 
> org.apache.solr.handler.dataimport.DocBuilder.buildDocument(Doc

[JENKINS-MAVEN] Lucene-Solr-Maven-master #2225: POMs out of sync

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-Maven-master/2225/

No tests ran.

Build Log:
[...truncated 31572 lines...]
  [mvn] [INFO] -
  [mvn] [INFO] -
  [mvn] [ERROR] COMPILATION ERROR : 
  [mvn] [INFO] -

[...truncated 204 lines...]
BUILD FAILED
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Maven-master/build.xml:679: 
The following error occurred while executing this line:
: Java returned: 1

Total time: 14 minutes 46 seconds
Build step 'Invoke Ant' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

[jira] [Commented] (SOLR-12176) Improve FORCELEADER to handle the case when a replica win the election but does not present in clusterstate

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12176?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424947#comment-16424947
 ] 

ASF subversion and git services commented on SOLR-12176:


Commit 34b83ed86993d71ba3bb9ae58a3df8ce4351a045 in lucene-solr's branch 
refs/heads/master from [~caomanhdat]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=34b83ed ]

SOLR-12176: Improve FORCELEADER to handle the case when a replica win the 
election but does not present in clusterstate


> Improve FORCELEADER to handle the case when a replica win the election but 
> does not present in clusterstate
> ---
>
> Key: SOLR-12176
> URL: https://issues.apache.org/jira/browse/SOLR-12176
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Cao Manh Dat
>Assignee: Cao Manh Dat
>Priority: Major
> Attachments: SOLR-12176.patch
>
>
> There can be the case when a replica wins the election but it does not 
> present in clusterstate. Maybe when the Overseer sent the UNLOAD request to 
> the LEADER (in DeleteReplicaCmd), it met some exception (therefore the 
> request never reach the LEADER), the Overseer it that case will forcefully 
> remove the LEADER from clusterstate. 
> If a shard reaches that case, users will only see a leaderless shard and call 
> FORCELEADER won't be able to solve their problem. Therefore FORCELEADER 
> should be more robust, to handle such cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12176) Improve FORCELEADER to handle the case when a replica win the election but does not present in clusterstate

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12176?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424948#comment-16424948
 ] 

ASF subversion and git services commented on SOLR-12176:


Commit 3c68f3d63769ec1e9c7400a0974837f051046a65 in lucene-solr's branch 
refs/heads/branch_7x from [~caomanhdat]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=3c68f3d ]

SOLR-12176: Improve FORCELEADER to handle the case when a replica win the 
election but does not present in clusterstate


> Improve FORCELEADER to handle the case when a replica win the election but 
> does not present in clusterstate
> ---
>
> Key: SOLR-12176
> URL: https://issues.apache.org/jira/browse/SOLR-12176
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Cao Manh Dat
>Assignee: Cao Manh Dat
>Priority: Major
> Attachments: SOLR-12176.patch
>
>
> There can be the case when a replica wins the election but it does not 
> present in clusterstate. Maybe when the Overseer sent the UNLOAD request to 
> the LEADER (in DeleteReplicaCmd), it met some exception (therefore the 
> request never reach the LEADER), the Overseer it that case will forcefully 
> remove the LEADER from clusterstate. 
> If a shard reaches that case, users will only see a leaderless shard and call 
> FORCELEADER won't be able to solve their problem. Therefore FORCELEADER 
> should be more robust, to handle such cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Re: Welcome Cao Mạnh Đạt to the PMC

2018-04-03 Thread Koji Sekiguchi

Welcome Đạt!

Koji

On 2018/04/03 4:50, Adrien Grand wrote:

Fixing the subject of the email.

Le lun. 2 avr. 2018 à 21:48, Adrien Grand mailto:jpou...@gmail.com>> a écrit :

I am pleased to announce that Cao Mạnh Đạt has accepted the PMC's 
invitation to join.

Welcome Đạt!



-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-Tests-7.3 - Build # 48 - Unstable

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.3/48/

3 tests failed.
FAILED:  
org.apache.solr.cloud.autoscaling.sim.TestTriggerIntegration.testNodeAddedTriggerRestoreState

Error Message:
The trigger did not fire at all

Stack Trace:
java.lang.AssertionError: The trigger did not fire at all
at 
__randomizedtesting.SeedInfo.seed([CA6E4E49ADA1E82A:4253C73697610987]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at 
org.apache.solr.cloud.autoscaling.sim.TestTriggerIntegration.testNodeAddedTriggerRestoreState(TestTriggerIntegration.java:368)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  org.apache.solr.client.solrj.TestLBHttpSolrClient.testReliability

Error Message:
No live SolrServers available to handle this request

Stack Trace:
org.apache.solr.client.solrj.SolrServerException: No live SolrServers available 
to handle this request
at 
__randomizedtest

[jira] [Commented] (SOLR-12185) Can't change Single Valued field to Multi Valued even by deleting/readding

2018-04-03 Thread Shawn Heisey (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12185?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424936#comment-16424936
 ] 

Shawn Heisey commented on SOLR-12185:
-

Changing the multiValued setting on a field with existing data that includes 
docValues requires getting rid of the entire index and building it again.

This behavior comes from Lucene, not Solr.  Lucene saves certain information 
about the docValues into the index, and once it's there, it can't be changed.  
There's nothing we can do in Solr to fix this behavior.



> Can't change Single Valued field to Multi Valued even by deleting/readding
> --
>
> Key: SOLR-12185
> URL: https://issues.apache.org/jira/browse/SOLR-12185
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Schema and Analysis
>Affects Versions: 7.1
>Reporter: Cetra Free
>Priority: Major
>
> Changing a single-valued field to multi-valued field with doc values breaks 
> things.  This doesn't matter if you change the field or do a complete delete 
> and re-add of the field.  The only way I have found to "fix" this is to 
> delete the entire core from disk and re-add it.
> h2. Steps to replicate:
>  * Create a field, make it single valued with doc values
>  * Index a couple of docs
>  * Delete the field
>  * Add the field again with the same name, but change it to multiValued
>  * Try indexing a couple of docs
> h2. Expected result:
> The documents are indexed correctly and there are no issues
> h2. Actual outcome:
> The documents refuse to be indexed and you see this in the logs:
> {code:java}
> org.apache.solr.common.SolrException: Exception writing document id 
> 6a3226c8-c904-40d7-aecb-76c3515db7b8 to the index; possible analysis error: 
> cannot change DocValues type from SORTED to SORTED_SET for field 
> "example_field"
>     at 
> org.apache.solr.update.DirectUpdateHandler2.addDoc(DirectUpdateHandler2.java:221)
>     at 
> org.apache.solr.update.processor.RunUpdateProcessor.processAdd(RunUpdateProcessorFactory.java:67)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.doLocalAdd(DistributedUpdateProcessor.java:991)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.versionAdd(DistributedUpdateProcessor.java:1207)
>     at 
> org.apache.solr.update.processor.DistributedUpdateProcessor.processAdd(DistributedUpdateProcessor.java:753)
>     at 
> org.apache.solr.update.processor.LogUpdateProcessorFactory$LogUpdateProcessor.processAdd(LogUpdateProcessorFactory.java:103)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.AddSchemaFieldsUpdateProcessorFactory$AddSchemaFieldsUpdateProcessor.processAdd(AddSchemaFieldsUpdateProcessorFactory.java:474)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldNameMutatingUpdateProcessorFactory$1.processAdd(FieldNameMutatingUpdateProcessorFactory.java:74)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
>     at 
> org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
>     at 
> org.apache.solr.update.processor.AbstractDefaultValueUpdateProcessorFactory$DefaultValueUpdateProcessor.processAdd(AbstractDefaultValueUpdateProcessorFact

[jira] [Created] (SOLR-12187) Replica should watch clusterstate and unload itself if its entry is removed

2018-04-03 Thread Cao Manh Dat (JIRA)
Cao Manh Dat created SOLR-12187:
---

 Summary: Replica should watch clusterstate and unload itself if 
its entry is removed
 Key: SOLR-12187
 URL: https://issues.apache.org/jira/browse/SOLR-12187
 Project: Solr
  Issue Type: Bug
  Security Level: Public (Default Security Level. Issues are Public)
Reporter: Cao Manh Dat
Assignee: Cao Manh Dat


With the introduction of autoscaling framework, we have seen an increase in the 
number of issues related to the race condition between delete a replica and 
other stuff.

Case 1: DeleteReplicaCmd failed to send UNLOAD request to a replica, therefore, 
forcefully remove its entry from clusterstate, but the replica still function 
normally and be able to become a leader -> SOLR-12176
Case 2:
 * DeleteReplicaCmd enqueue a DELETECOREOP (without sending a request to 
replica because the node is not live)
 * The node start and the replica get loaded
 * DELETECOREOP has not processed hence the replica still present in 
clusterstate --> pass checkStateInZk
 * DELETECOREOP is executed, DeleteReplicaCmd finished
 ** result 1: the replica start recovering, finish it and publish itself as 
ACTIVE --> state of the replica is ACTIVE
 ** result 2: the replica throw an exception (probably: NPE) 
--> state of the replica is DOWN, not join leader election



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12168) LIROnShardRestartTest failures

2018-04-03 Thread Cao Manh Dat (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12168?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cao Manh Dat resolved SOLR-12168.
-
Resolution: Fixed

> LIROnShardRestartTest failures
> --
>
> Key: SOLR-12168
> URL: https://issues.apache.org/jira/browse/SOLR-12168
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Cao Manh Dat
>Assignee: Cao Manh Dat
>Priority: Minor
>




--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12186) XPathEntityProcessor with useSolrAddSchema does not add nested child documents

2018-04-03 Thread Cetra Free (JIRA)
Cetra Free created SOLR-12186:
-

 Summary: XPathEntityProcessor with useSolrAddSchema does not add 
nested child documents
 Key: SOLR-12186
 URL: https://issues.apache.org/jira/browse/SOLR-12186
 Project: Solr
  Issue Type: Bug
  Security Level: Public (Default Security Level. Issues are Public)
  Components: contrib - DataImportHandler
Affects Versions: 7.1
Reporter: Cetra Free


When using {{useSolrAddSchema=true}} this does not support child nested 
documents as per the normal update handler.

I would expect this to either be mentioned in the documentation as a 
limitation, or supported.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12066) Cleanup deleted core when node start

2018-04-03 Thread Cao Manh Dat (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12066?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cao Manh Dat resolved SOLR-12066.
-
Resolution: Fixed

> Cleanup deleted core when node start
> 
>
> Key: SOLR-12066
> URL: https://issues.apache.org/jira/browse/SOLR-12066
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: AutoScaling, SolrCloud
>Reporter: Varun Thacker
>Assignee: Cao Manh Dat
>Priority: Major
> Fix For: 7.4, master (8.0)
>
> Attachments: SOLR-12066.patch, SOLR-12066.patch
>
>
> Initially when SOLR-12047 was created it looked like waiting for a state in 
> ZK for only 3 seconds was the culprit for cores not loading up
>  
> But it turns out to be something else. Here are the steps to reproduce this 
> problem
>  
>  - create a 3 node cluster
>  - create a 1 shard X 2 replica collection to use node1 and node2 ( 
> [http://localhost:8983/solr/admin/collections?action=create&name=test_node_lost&numShards=1&nrtReplicas=2&autoAddReplicas=true]
>  )
>  - stop node 2 : ./bin/solr stop -p 7574
>  - Solr will create a new replica on node3 after 30 seconds because of the 
> ".auto_add_replicas" trigger
>  - At this point state.json has info about replicas being on node1 and node3
>  - Start node2. Bam!
> {code:java}
> java.util.concurrent.ExecutionException: 
> org.apache.solr.common.SolrException: Unable to create core 
> [test_node_lost_shard1_replica_n2]
> ...
> Caused by: org.apache.solr.common.SolrException: Unable to create core 
> [test_node_lost_shard1_replica_n2]
> at 
> org.apache.solr.core.CoreContainer.createFromDescriptor(CoreContainer.java:1053)
> ...
> Caused by: org.apache.solr.common.SolrException: 
> at org.apache.solr.cloud.ZkController.preRegister(ZkController.java:1619)
> at 
> org.apache.solr.core.CoreContainer.createFromDescriptor(CoreContainer.java:1030)
> ...
> Caused by: org.apache.solr.common.SolrException: coreNodeName core_node4 does 
> not exist in shard shard1: 
> DocCollection(test_node_lost//collections/test_node_lost/state.json/12)={
> ...{code}
>  
> The practical effects of this is not big since the move replica has already 
> put the replica on another JVM . But to the user it's super confusing on 
> what's happening. He can never get rid of this error unless he manually 
> cleans up the data directory on node2 and restart
>  
> Please note: I chose autoAddReplicas=true to reproduce this. but a user could 
> be using a node lost trigger and and run into the same issue



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12176) Improve FORCELEADER to handle the case when a replica win the election but does not present in clusterstate

2018-04-03 Thread Cao Manh Dat (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12176?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424913#comment-16424913
 ] 

Cao Manh Dat commented on SOLR-12176:
-

I will commit soon if no one has any objection.

> Improve FORCELEADER to handle the case when a replica win the election but 
> does not present in clusterstate
> ---
>
> Key: SOLR-12176
> URL: https://issues.apache.org/jira/browse/SOLR-12176
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Cao Manh Dat
>Assignee: Cao Manh Dat
>Priority: Major
> Attachments: SOLR-12176.patch
>
>
> There can be the case when a replica wins the election but it does not 
> present in clusterstate. Maybe when the Overseer sent the UNLOAD request to 
> the LEADER (in DeleteReplicaCmd), it met some exception (therefore the 
> request never reach the LEADER), the Overseer it that case will forcefully 
> remove the LEADER from clusterstate. 
> If a shard reaches that case, users will only see a leaderless shard and call 
> FORCELEADER won't be able to solve their problem. Therefore FORCELEADER 
> should be more robust, to handle such cases.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12185) Can't change Single Valued field to Multi Valued even by deleting/readding

2018-04-03 Thread Cetra Free (JIRA)
Cetra Free created SOLR-12185:
-

 Summary: Can't change Single Valued field to Multi Valued even by 
deleting/readding
 Key: SOLR-12185
 URL: https://issues.apache.org/jira/browse/SOLR-12185
 Project: Solr
  Issue Type: Bug
  Security Level: Public (Default Security Level. Issues are Public)
  Components: Schema and Analysis
Affects Versions: 7.1
Reporter: Cetra Free


Changing a single-valued field to multi-valued field with doc values breaks 
things.  This doesn't matter if you change the field or do a complete delete 
and re-add of the field.  The only way I have found to "fix" this is to delete 
the entire core from disk and re-add it.
h2. Steps to replicate:
 * Create a field, make it single valued with doc values
 * Index a couple of docs
 * Delete the field
 * Add the field again with the same name, but change it to multiValued
 * Try indexing a couple of docs

h2. Expected result:

The documents are indexed correctly and there are no issues
h2. Actual outcome:

The documents refuse to be indexed and you see this in the logs:
{code:java}
org.apache.solr.common.SolrException: Exception writing document id 
6a3226c8-c904-40d7-aecb-76c3515db7b8 to the index; possible analysis error: 
cannot change DocValues type from SORTED to SORTED_SET for field "example_field"
    at 
org.apache.solr.update.DirectUpdateHandler2.addDoc(DirectUpdateHandler2.java:221)
    at 
org.apache.solr.update.processor.RunUpdateProcessor.processAdd(RunUpdateProcessorFactory.java:67)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.DistributedUpdateProcessor.doLocalAdd(DistributedUpdateProcessor.java:991)
    at 
org.apache.solr.update.processor.DistributedUpdateProcessor.versionAdd(DistributedUpdateProcessor.java:1207)
    at 
org.apache.solr.update.processor.DistributedUpdateProcessor.processAdd(DistributedUpdateProcessor.java:753)
    at 
org.apache.solr.update.processor.LogUpdateProcessorFactory$LogUpdateProcessor.processAdd(LogUpdateProcessorFactory.java:103)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.AddSchemaFieldsUpdateProcessorFactory$AddSchemaFieldsUpdateProcessor.processAdd(AddSchemaFieldsUpdateProcessorFactory.java:474)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.FieldNameMutatingUpdateProcessorFactory$1.processAdd(FieldNameMutatingUpdateProcessorFactory.java:74)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.FieldMutatingUpdateProcessor.processAdd(FieldMutatingUpdateProcessor.java:118)
    at 
org.apache.solr.update.processor.UpdateRequestProcessor.processAdd(UpdateRequestProcessor.java:55)
    at 
org.apache.solr.update.processor.AbstractDefaultValueUpdateProcessorFactory$DefaultValueUpdateProcessor.processAdd(AbstractDefaultValueUpdateProcessorFactory.java:91)
    at 
org.apache.solr.handler.dataimport.SolrWriter.upload(SolrWriter.java:80)
    at 
org.apache.solr.handler.dataimport.DataImportHandler$1.upload(DataImportHandler.java:257)
    at 
org.apache.solr.handler.dataimport.DocBuilder.buildDocument(DocBuilder.java:527)
    at 
org.apache.solr.handler.dataimport.DocBuilder.buildDocument(DocBuilder.java:415)
    at 
org.apache.solr.handler.dataimport.DocBuilder.doFullDump(DocBuilder.java:330)
    at 
org.apache.solr.handler.dataimport.DocBuilder.execute(DocBuilder.java:233)
    at 
org.apache.solr.handler.dataimport.DataImporter.doFullImport(DataImporter.java:415)
    at 
org.apache.solr.handler.dataimport.DataImporter.runCmd(DataImporter.java:474)
    at 
org.apache.solr.ha

[jira] [Resolved] (SOLR-12154) Disallow Log4j2 explicit usage via forbidden APIs

2018-04-03 Thread Varun Thacker (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12154?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Varun Thacker resolved SOLR-12154.
--
Resolution: Fixed

Until INFRA-15850 is resolved the user tagged with the commit will not be me 

> Disallow Log4j2 explicit usage via forbidden APIs
> -
>
> Key: SOLR-12154
> URL: https://issues.apache.org/jira/browse/SOLR-12154
> Project: Solr
>  Issue Type: Sub-task
>Reporter: Varun Thacker
>Assignee: Varun Thacker
>Priority: Blocker
> Fix For: 7.4
>
> Attachments: SOLR-12154.patch, SOLR-12154.patch
>
>
> We need to add org.apache.logging.log4j.** to forbidden APIs
> From [Tomás|https://reviews.apache.org/users/tflobbe/] on the reviewboard 
> discussion ( [https://reviews.apache.org/r/65888/] ) 
> {quote} We *don't* do log4j calls in the code in general, we have that 
> explicitly forbidden in forbidden APIS today, and code that does something 
> with log4j has to supress that. Developers must instead use slf4j APIs. I 
> don't believe that's changing now with log4j2, or does it?
> {quote}
> We need to address this before 7.4 to make sure we don't break anything by 
> using Log4j2 directly 
> After SOLR-7887 the following classes explicitly import the 
> org.apache.logging.log4j.** package so let's validate it's usage
> - Log4j2Watcher
> - SolrLogLayout
> - StartupLoggingUtils
> - RequestLoggingTest
> - LoggingHandlerTest
> - SolrTestCaseJ4
> - TestLogLevelAnnotations
> - LogLevel



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-7887) Upgrade Solr to use log4j2 -- log4j 1 now officially end of life

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-7887?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424856#comment-16424856
 ] 

ASF subversion and git services commented on SOLR-7887:
---

Commit 8d9d821c3416136ef3830e48e2d15fefbf3ef058 in lucene-solr's branch 
refs/heads/branch_7x from [~varun_saxena]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=8d9d821 ]

SOLR-7887: Fix logging filePattern to use solr.log.X format

(cherry picked from commit 41a1cbe)


> Upgrade Solr to use log4j2 -- log4j 1 now officially end of life
> 
>
> Key: SOLR-7887
> URL: https://issues.apache.org/jira/browse/SOLR-7887
> Project: Solr
>  Issue Type: Task
>Reporter: Shawn Heisey
>Assignee: Erick Erickson
>Priority: Major
> Fix For: 7.4
>
> Attachments: SOLR-7887-WIP.patch, SOLR-7887-eoe-review.patch, 
> SOLR-7887-eoe-review.patch, SOLR-7887-followup_1.patch, SOLR-7887.patch, 
> SOLR-7887.patch, SOLR-7887.patch, SOLR-7887.patch, SOLR-7887.patch, 
> SOLR-7887.patch, SOLR-7887.patch, SOLR-7887.patch, SOLR-7887.patch, 
> SOLR-7887.patch, SOLR-7887.patch, SOLR-7887.patch, SOLR-7887.patch, 
> SOLR-7887_followup_2.patch, SOLR-7887_followup_2.patch
>
>
> The logging services project has officially announced the EOL of log4j 1:
> https://blogs.apache.org/foundation/entry/apache_logging_services_project_announces
> In the official binary jetty deployment, we use use log4j 1.2 as our final 
> logging destination, so the admin UI has a log watcher that actually uses 
> log4j and java.util.logging classes.  That will need to be extended to add 
> log4j2.  I think that might be the largest pain point to this upgrade.
> There is some crossover between log4j2 and slf4j.  Figuring out exactly which 
> jars need to be in the lib/ext directory will take some research.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12154) Disallow Log4j2 explicit usage via forbidden APIs

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12154?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424857#comment-16424857
 ] 

ASF subversion and git services commented on SOLR-12154:


Commit 3f2a37dc74f67d701fa3607be010793733c72f0e in lucene-solr's branch 
refs/heads/branch_7x from [~varun_saxena]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=3f2a37d ]

SOLR-12154: Disallow explicit usage of Log4j2 logger via forbidden APIs


> Disallow Log4j2 explicit usage via forbidden APIs
> -
>
> Key: SOLR-12154
> URL: https://issues.apache.org/jira/browse/SOLR-12154
> Project: Solr
>  Issue Type: Sub-task
>Reporter: Varun Thacker
>Assignee: Varun Thacker
>Priority: Blocker
> Fix For: 7.4
>
> Attachments: SOLR-12154.patch, SOLR-12154.patch
>
>
> We need to add org.apache.logging.log4j.** to forbidden APIs
> From [Tomás|https://reviews.apache.org/users/tflobbe/] on the reviewboard 
> discussion ( [https://reviews.apache.org/r/65888/] ) 
> {quote} We *don't* do log4j calls in the code in general, we have that 
> explicitly forbidden in forbidden APIS today, and code that does something 
> with log4j has to supress that. Developers must instead use slf4j APIs. I 
> don't believe that's changing now with log4j2, or does it?
> {quote}
> We need to address this before 7.4 to make sure we don't break anything by 
> using Log4j2 directly 
> After SOLR-7887 the following classes explicitly import the 
> org.apache.logging.log4j.** package so let's validate it's usage
> - Log4j2Watcher
> - SolrLogLayout
> - StartupLoggingUtils
> - RequestLoggingTest
> - LoggingHandlerTest
> - SolrTestCaseJ4
> - TestLogLevelAnnotations
> - LogLevel



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-7976) Add a parameter to TieredMergePolicy to merge segments that have more than X percent deleted documents

2018-04-03 Thread Yonik Seeley (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-7976?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424851#comment-16424851
 ] 

Yonik Seeley commented on LUCENE-7976:
--

bq.  I further propose that this be an optional argument to the command that 
would override the setting in solrconfig.xml (if any). WDYT?

+1
Seems like a good way to enable minor compactions during peak hours and major 
compactions off-peak. 

> Add a parameter to TieredMergePolicy to merge segments that have more than X 
> percent deleted documents
> --
>
> Key: LUCENE-7976
> URL: https://issues.apache.org/jira/browse/LUCENE-7976
> Project: Lucene - Core
>  Issue Type: Improvement
>Reporter: Erick Erickson
>Assignee: Erick Erickson
>Priority: Major
> Attachments: LUCENE-7976.patch
>
>
> We're seeing situations "in the wild" where there are very large indexes (on 
> disk) handled quite easily in a single Lucene index. This is particularly 
> true as features like docValues move data into MMapDirectory space. The 
> current TMP algorithm allows on the order of 50% deleted documents as per a 
> dev list conversation with Mike McCandless (and his blog here:  
> https://www.elastic.co/blog/lucenes-handling-of-deleted-documents).
> Especially in the current era of very large indexes in aggregate, (think many 
> TB) solutions like "you need to distribute your collection over more shards" 
> become very costly. Additionally, the tempting "optimize" button exacerbates 
> the issue since once you form, say, a 100G segment (by 
> optimizing/forceMerging) it is not eligible for merging until 97.5G of the 
> docs in it are deleted (current default 5G max segment size).
> The proposal here would be to add a new parameter to TMP, something like 
>  (no, that's not serious name, suggestions 
> welcome) which would default to 100 (or the same behavior we have now).
> So if I set this parameter to, say, 20%, and the max segment size stays at 
> 5G, the following would happen when segments were selected for merging:
> > any segment with > 20% deleted documents would be merged or rewritten NO 
> > MATTER HOW LARGE. There are two cases,
> >> the segment has < 5G "live" docs. In that case it would be merged with 
> >> smaller segments to bring the resulting segment up to 5G. If no smaller 
> >> segments exist, it would just be rewritten
> >> The segment has > 5G "live" docs (the result of a forceMerge or optimize). 
> >> It would be rewritten into a single segment removing all deleted docs no 
> >> matter how big it is to start. The 100G example above would be rewritten 
> >> to an 80G segment for instance.
> Of course this would lead to potentially much more I/O which is why the 
> default would be the same behavior we see now. As it stands now, though, 
> there's no way to recover from an optimize/forceMerge except to re-index from 
> scratch. We routinely see 200G-300G Lucene indexes at this point "in the 
> wild" with 10s of  shards replicated 3 or more times. And that doesn't even 
> include having these over HDFS.
> Alternatives welcome! Something like the above seems minimally invasive. A 
> new merge policy is certainly an alternative.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12154) Disallow Log4j2 explicit usage via forbidden APIs

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12154?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424834#comment-16424834
 ] 

ASF subversion and git services commented on SOLR-12154:


Commit 56f80c0dc72fe8ac81c4af37c0da1b4d04cc7097 in lucene-solr's branch 
refs/heads/master from [~varun_saxena]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=56f80c0 ]

SOLR-12154: Disallow explicit usage of Log4j2 logger via forbidden APIs


> Disallow Log4j2 explicit usage via forbidden APIs
> -
>
> Key: SOLR-12154
> URL: https://issues.apache.org/jira/browse/SOLR-12154
> Project: Solr
>  Issue Type: Sub-task
>Reporter: Varun Thacker
>Assignee: Varun Thacker
>Priority: Blocker
> Fix For: 7.4
>
> Attachments: SOLR-12154.patch, SOLR-12154.patch
>
>
> We need to add org.apache.logging.log4j.** to forbidden APIs
> From [Tomás|https://reviews.apache.org/users/tflobbe/] on the reviewboard 
> discussion ( [https://reviews.apache.org/r/65888/] ) 
> {quote} We *don't* do log4j calls in the code in general, we have that 
> explicitly forbidden in forbidden APIS today, and code that does something 
> with log4j has to supress that. Developers must instead use slf4j APIs. I 
> don't believe that's changing now with log4j2, or does it?
> {quote}
> We need to address this before 7.4 to make sure we don't break anything by 
> using Log4j2 directly 
> After SOLR-7887 the following classes explicitly import the 
> org.apache.logging.log4j.** package so let's validate it's usage
> - Log4j2Watcher
> - SolrLogLayout
> - StartupLoggingUtils
> - RequestLoggingTest
> - LoggingHandlerTest
> - SolrTestCaseJ4
> - TestLogLevelAnnotations
> - LogLevel



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS-MAVEN] Lucene-Solr-Maven-7.x #172: POMs out of sync

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-Maven-7.x/172/

No tests ran.

Build Log:
[...truncated 31626 lines...]
  [mvn] [INFO] -
  [mvn] [INFO] -
  [mvn] [ERROR] COMPILATION ERROR : 
  [mvn] [INFO] -

[...truncated 204 lines...]
BUILD FAILED
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Maven-7.x/build.xml:679: The 
following error occurred while executing this line:
: Java returned: 1

Total time: 14 minutes 48 seconds
Build step 'Invoke Ant' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

[jira] [Commented] (LUCENE-7976) Add a parameter to TieredMergePolicy to merge segments that have more than X percent deleted documents

2018-04-03 Thread Erick Erickson (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-7976?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424814#comment-16424814
 ] 

Erick Erickson commented on LUCENE-7976:


OK, I'm working on this today, [~mikemccand], thanks for the hints.

Next up is making forceMerge respect maxMergedSegmentSizeMB. I further propose 
that this be an _optional_ argument to the command that would override the 
setting in solrconfig.xml (if any). WDYT?

Note this is a significant change in behavior from the perspective that someone 
does a forcemerge and then will ask "What? I didn't get one segment when I was 
done!". Of course putting it in CHANGES.txt and the ref guide is indicated.

> if a person does  forcemerge, then there are two parameters
> maxMergedSegmentsSizeMB
> maxSegments

> maxMergedSegmentsSizeMB overrides maxSegments if both are specified
> if only one is specified, it's respected.
> if neither are specified then whatever TMP was configured with is used.


> Add a parameter to TieredMergePolicy to merge segments that have more than X 
> percent deleted documents
> --
>
> Key: LUCENE-7976
> URL: https://issues.apache.org/jira/browse/LUCENE-7976
> Project: Lucene - Core
>  Issue Type: Improvement
>Reporter: Erick Erickson
>Assignee: Erick Erickson
>Priority: Major
> Attachments: LUCENE-7976.patch
>
>
> We're seeing situations "in the wild" where there are very large indexes (on 
> disk) handled quite easily in a single Lucene index. This is particularly 
> true as features like docValues move data into MMapDirectory space. The 
> current TMP algorithm allows on the order of 50% deleted documents as per a 
> dev list conversation with Mike McCandless (and his blog here:  
> https://www.elastic.co/blog/lucenes-handling-of-deleted-documents).
> Especially in the current era of very large indexes in aggregate, (think many 
> TB) solutions like "you need to distribute your collection over more shards" 
> become very costly. Additionally, the tempting "optimize" button exacerbates 
> the issue since once you form, say, a 100G segment (by 
> optimizing/forceMerging) it is not eligible for merging until 97.5G of the 
> docs in it are deleted (current default 5G max segment size).
> The proposal here would be to add a new parameter to TMP, something like 
>  (no, that's not serious name, suggestions 
> welcome) which would default to 100 (or the same behavior we have now).
> So if I set this parameter to, say, 20%, and the max segment size stays at 
> 5G, the following would happen when segments were selected for merging:
> > any segment with > 20% deleted documents would be merged or rewritten NO 
> > MATTER HOW LARGE. There are two cases,
> >> the segment has < 5G "live" docs. In that case it would be merged with 
> >> smaller segments to bring the resulting segment up to 5G. If no smaller 
> >> segments exist, it would just be rewritten
> >> The segment has > 5G "live" docs (the result of a forceMerge or optimize). 
> >> It would be rewritten into a single segment removing all deleted docs no 
> >> matter how big it is to start. The 100G example above would be rewritten 
> >> to an 80G segment for instance.
> Of course this would lead to potentially much more I/O which is why the 
> default would be the same behavior we see now. As it stands now, though, 
> there's no way to recover from an optimize/forceMerge except to re-index from 
> scratch. We routinely see 200G-300G Lucene indexes at this point "in the 
> wild" with 10s of  shards replicated 3 or more times. And that doesn't even 
> include having these over HDFS.
> Alternatives welcome! Something like the above seems minimally invasive. A 
> new merge policy is certainly an alternative.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS-EA] Lucene-Solr-7.3-Linux (64bit/jdk-11-ea+5) - Build # 117 - Unstable!

2018-04-03 Thread Policeman Jenkins Server
Error processing tokens: Error while parsing action 
'Text/ZeroOrMore/FirstOf/Token/DelimitedToken/DelimitedToken_Action3' at input 
position (line 79, pos 4):
)"}
   ^

java.lang.OutOfMemoryError: Java heap space

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

[jira] [Created] (SOLR-12184) Master/Slave configuration exposes Basic Auth password in plain text.

2018-04-03 Thread Syed B. Ahmed (JIRA)
Syed B. Ahmed created SOLR-12184:


 Summary: Master/Slave configuration exposes Basic Auth password in 
plain text. 
 Key: SOLR-12184
 URL: https://issues.apache.org/jira/browse/SOLR-12184
 Project: Solr
  Issue Type: Bug
  Security Level: Public (Default Security Level. Issues are Public)
  Components: replication (java)
Affects Versions: 7.2
Reporter: Syed B. Ahmed


Copying my original question and reply from Shawn Heisey.
{quote}Seems even when we use Secuirty.json with BasicAuthentication Plugin as 
documented here -- 
[https://lucene.apache.org/solr/guide/7_2/basic-authentication-plugin.html]
, which nicely encrypts the user password using SHA256 encryption,  when it 
comes to configuring{quote}
{quote}Please let me know how I can use the same encrypted password as in 
Security.json when setting up Master/Slave Replication for Solr.{quote}
 
At the moment, the cleartext password is the only way it can be configured.
 
It is not possible to use the same string that goes in security.json for
a feature like replication.  That string is a one-way hash of the
password, so it cannot be decrypted.  The replication handler must be
able to obtain the cleartext password.
 
The DIH feature offers password encryption for database passwords. 
Scroll down a little bit on the following page to the description
numbered "2":
 
[https://lucene.apache.org/solr/guide/6_6/uploading-structured-data-store-data-with-the-data-import-handler.html#configuring-the-dih-configuration-file]
 
The replication handler CAN be enhanced to use a the same kind of
encryption.  Note that this is merely security through obscurity.  If
whoever is looking at the configuration also has access to the key file,
then they will be able to decrypt the password.
 
Can you file an enhancement issue in Jira to add this capability to
other handlers like replication?
 
 
 
 
 
Hello,
Seems even when we use Secuirty.json with BasicAuthentication Plugin as 
documented here -- 
[https://lucene.apache.org/solr/guide/7_2/basic-authentication-plugin.html]
, which nicely encrypts the user password using SHA256 encryption,  when it 
comes to configuring the slave in a Master/Slave Index Replication Strategy, 
the slave config requires to give the
BasicAuthentication password in plain text?  Is it something I got wrong?  But 
in my setup of HA with Master/Slave replication it works in this manner.
 
[https://lucene.apache.org/solr/guide/7_2/index-replication.html]  this also 
indicates the config is in plain text.
 

 
username
password
 
 
Please let me know how I can use the same encrypted password as in 
Security.json when setting up Master/Slave Replication for Solr.
 
Thx
-Syed Ahmed.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Re: VOTE: Apache Solr Reference Guide for Solr 7.3 RC1

2018-04-03 Thread Anshum Gupta
+1

On Tue, Apr 3, 2018 at 2:25 PM Tomas Fernandez Lobbe 
wrote:

> +1
>
>
> On Apr 3, 2018, at 12:45 PM, Varun Thacker  wrote:
>
> +1
>
> On Tue, Apr 3, 2018 at 10:47 AM, Steve Rowe  wrote:
>
>> +1
>>
>> --
>> Steve
>> www.lucidworks.com
>>
>> > On Apr 3, 2018, at 10:06 AM, Mikhail Khludnev  wrote:
>> >
>> > I've looked through recent changes in PDF. It seems good.
>> >
>> > On Tue, Apr 3, 2018 at 4:32 PM, Cassandra Targett <
>> casstarg...@gmail.com> wrote:
>> > Reminder about this.
>> >
>> > It looks like the Lucene/Solr release vote is going to pass, so we
>> could have both released at about the same time.
>> >
>> > Thanks,
>> > Cassandra
>> >
>> > On Thu, Mar 29, 2018 at 10:49 AM, Cassandra Targett <
>> casstarg...@gmail.com> wrote:
>> > Please vote to release the Apache Solr Reference Guide for Solr 7.3.
>> >
>> > The artifacts can be downloaded from:
>> >
>> https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-7.3-RC1/
>> >
>> > $ cat apache-solr-ref-guide-7.3.pdf.sha1
>> > 151f06d920d1ac41564f3c0ddabae3c2c36b6892  apache-solr-ref-guide-7.3.pdf
>> >
>> > The HTML version has also been uploaded to the website:
>> > https://lucene.apache.org/solr/guide/7_3/
>> >
>> > Here's my +1.
>> >
>> > If it happens that this vote passes before the vote for the final
>> Lucene/Solr RC is complete, I'll hold release/announcement of the Ref Guide
>> until the vote is complete and the release steps are finished.
>> >
>> > Thanks,
>> > Cassandra
>> >
>> >
>> >
>> >
>> > --
>> > Sincerely yours
>> > Mikhail Khludnev
>>
>>
>> -
>> To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
>> For additional commands, e-mail: dev-h...@lucene.apache.org
>>
>>
>
>


[jira] [Commented] (SOLR-12172) Race condition in collection properties can cause invalid cache of properties

2018-04-03 Thread JIRA

[ 
https://issues.apache.org/jira/browse/SOLR-12172?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424785#comment-16424785
 ] 

Tomás Fernández Löbbe commented on SOLR-12172:
--

[~shalinmangar], just by keeping the synchronization I added to 
{{refreshAndWatch}} in the previous commit we can guarantee that we won't be 
setting the collection property map to an older value, however, I don't think 
we can guarantee that the notifications to watchers won't be out of order 
without using the single thread executor. Are you suggesting that we go that 
way anyway?


> Race condition in collection properties can cause invalid cache of properties
> -
>
> Key: SOLR-12172
> URL: https://issues.apache.org/jira/browse/SOLR-12172
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Tests
>Reporter: Tomás Fernández Löbbe
>Assignee: Tomás Fernández Löbbe
>Priority: Minor
> Fix For: 7.4, master (8.0)
>
> Attachments: SOLR-12172.patch
>
>
> From: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-master/24
> {noformat}
> java.lang.AssertionError: Could not see value change after setting collection 
> property. Name: property2, current value: value2, expected value: newValue
>   at 
> __randomizedtesting.SeedInfo.seed([1BCE6473A2A5E68A:FD89A9BD30939A79]:0)
>   at org.junit.Assert.fail(Assert.java:93)
>   at 
> org.apache.solr.cloud.CollectionPropsTest.waitForValue(CollectionPropsTest.java:146)
>   at 
> org.apache.solr.cloud.CollectionPropsTest.testReadWriteCached(CollectionPropsTest.java:115){noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-Windows (64bit/jdk-10) - Build # 7252 - Still Unstable!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Windows/7252/
Java: 64bit/jdk-10 -XX:+UseCompressedOops -XX:+UseG1GC

8 tests failed.
FAILED:  org.apache.solr.handler.TestReplicationHandler.doTestStressReplication

Error Message:
found:2[index.20180404015240914, index.20180404015241823, index.properties, 
replication.properties, snapshot_metadata]

Stack Trace:
java.lang.AssertionError: found:2[index.20180404015240914, 
index.20180404015241823, index.properties, replication.properties, 
snapshot_metadata]
at 
__randomizedtesting.SeedInfo.seed([CD5DAE6ED7143F:DB665D686BFF7D8C]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at 
org.apache.solr.handler.TestReplicationHandler.checkForSingleIndex(TestReplicationHandler.java:963)
at 
org.apache.solr.handler.TestReplicationHandler.checkForSingleIndex(TestReplicationHandler.java:934)
at 
org.apache.solr.handler.TestReplicationHandler.doTestStressReplication(TestReplicationHandler.java:910)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementR

[jira] [Commented] (SOLR-11929) TestRecovery failures

2018-04-03 Thread Steve Rowe (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11929?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424784#comment-16424784
 ] 

Steve Rowe commented on SOLR-11929:
---

I traced the problem to the {{testReload()}} method being run prior to the 
other failing test methods; when I comment out its body all tests succeed with 
all seeds.  

The NPE in {{testBuffering()}} is caused by the {{TLOG.state}} metric not being 
present, apparently because {{UpdateLog.initializeMetrics()}} is never called 
on core reload.

I've attached a patch that initializes {{UpdateLog}} metrics upon core reload, 
and this allows all tests to pass with the seeds I have for this problem 
(another one not listed above: {{F999698B6BBE3431}}, from 
https://jenkins.thetaphi.de/job/Lucene-Solr-master-MacOSX/4542/ ).

[~ab], can you take a look and see if the patch is doing the right thing here?


> TestRecovery failures
> -
>
> Key: SOLR-11929
> URL: https://issues.apache.org/jira/browse/SOLR-11929
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Steve Rowe
>Priority: Major
> Attachments: SOLR-11929.patch
>
>
> My Jenkins found a branch_7x seed for {{TestRecovery.testBuffering()}} and 
> {{TestRecovery.testCorruptLog()}} that reproduces for me 5/5 times (when I 
> exclude {{-Dtests.method=...}} from the cmdline):
> {noformat}
> Checking out Revision 1ef988a26378137b1e1f022985dacee1f557f4fc 
> (refs/remotes/origin/branch_7x)
> [...]
>[junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=TestRecovery 
> -Dtests.method=testBuffering -Dtests.seed=FC96FD26F8A8CC6F -Dtests.slow=true 
> -Dtests.locale=de-GR -Dtests.timezone=Europe/London -Dtests.asserts=true 
> -Dtests.file.encoding=UTF-8
>[junit4] FAILURE 0.02s J3  | TestRecovery.testBuffering <<<
>[junit4]> Throwable #1: java.lang.AssertionError: expected:<1> but 
> was:<3>
>[junit4]>  at 
> __randomizedtesting.SeedInfo.seed([FC96FD26F8A8CC6F:E178530D59F16D44]:0)
>[junit4]>  at 
> org.apache.solr.search.TestRecovery.testBuffering(TestRecovery.java:494)
>[junit4]>  at java.lang.Thread.run(Thread.java:748)
> [...]
>[junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=TestRecovery 
> -Dtests.method=testCorruptLog -Dtests.seed=FC96FD26F8A8CC6F -Dtests.slow=true 
> -Dtests.locale=de-GR -Dtests.timezone=Europe/London -Dtests.asserts=true 
> -Dtests.file.encoding=UTF-8
>[junit4] ERROR   0.35s J3  | TestRecovery.testCorruptLog <<<
>[junit4]> Throwable #1: java.lang.RuntimeException: mismatch: '3'!='0' 
> @ response/numFound
>[junit4]>  at 
> __randomizedtesting.SeedInfo.seed([FC96FD26F8A8CC6F:E4B49F502909DB3]:0)
>[junit4]>  at 
> org.apache.solr.SolrTestCaseJ4.assertJQ(SolrTestCaseJ4.java:990)
>[junit4]>  at 
> org.apache.solr.SolrTestCaseJ4.assertJQ(SolrTestCaseJ4.java:937)
>[junit4]>  at 
> org.apache.solr.search.TestRecovery.testCorruptLog(TestRecovery.java:1367)
>[junit4]>  at java.lang.Thread.run(Thread.java:748)
> [...]
>[junit4]   2> NOTE: test params are: codec=Asserting(Lucene70): 
> {_root_=PostingsFormat(name=LuceneVarGapFixedInterval), 
> id=PostingsFormat(name=Direct)}, 
> docValues:{_version_=DocValuesFormat(name=Lucene70), 
> val_i_dvo=DocValuesFormat(name=Memory), val_i=DocValuesFormat(name=Memory)}, 
> maxPointsInLeafNode=1937, maxMBSortInHeap=7.529691259992591, 
> sim=RandomSimilarity(queryNorm=false): {}, locale=de-GR, 
> timezone=Europe/London
>[junit4]   2> NOTE: Linux 4.1.0-custom2-amd64 amd64/Oracle Corporation 
> 1.8.0_151 (64-bit)/cpus=16,threads=1,free=217064096,total=530579456
> {noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-11929) TestRecovery failures

2018-04-03 Thread Steve Rowe (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11929?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Steve Rowe updated SOLR-11929:
--
Attachment: SOLR-11929.patch

> TestRecovery failures
> -
>
> Key: SOLR-11929
> URL: https://issues.apache.org/jira/browse/SOLR-11929
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Steve Rowe
>Priority: Major
> Attachments: SOLR-11929.patch
>
>
> My Jenkins found a branch_7x seed for {{TestRecovery.testBuffering()}} and 
> {{TestRecovery.testCorruptLog()}} that reproduces for me 5/5 times (when I 
> exclude {{-Dtests.method=...}} from the cmdline):
> {noformat}
> Checking out Revision 1ef988a26378137b1e1f022985dacee1f557f4fc 
> (refs/remotes/origin/branch_7x)
> [...]
>[junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=TestRecovery 
> -Dtests.method=testBuffering -Dtests.seed=FC96FD26F8A8CC6F -Dtests.slow=true 
> -Dtests.locale=de-GR -Dtests.timezone=Europe/London -Dtests.asserts=true 
> -Dtests.file.encoding=UTF-8
>[junit4] FAILURE 0.02s J3  | TestRecovery.testBuffering <<<
>[junit4]> Throwable #1: java.lang.AssertionError: expected:<1> but 
> was:<3>
>[junit4]>  at 
> __randomizedtesting.SeedInfo.seed([FC96FD26F8A8CC6F:E178530D59F16D44]:0)
>[junit4]>  at 
> org.apache.solr.search.TestRecovery.testBuffering(TestRecovery.java:494)
>[junit4]>  at java.lang.Thread.run(Thread.java:748)
> [...]
>[junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=TestRecovery 
> -Dtests.method=testCorruptLog -Dtests.seed=FC96FD26F8A8CC6F -Dtests.slow=true 
> -Dtests.locale=de-GR -Dtests.timezone=Europe/London -Dtests.asserts=true 
> -Dtests.file.encoding=UTF-8
>[junit4] ERROR   0.35s J3  | TestRecovery.testCorruptLog <<<
>[junit4]> Throwable #1: java.lang.RuntimeException: mismatch: '3'!='0' 
> @ response/numFound
>[junit4]>  at 
> __randomizedtesting.SeedInfo.seed([FC96FD26F8A8CC6F:E4B49F502909DB3]:0)
>[junit4]>  at 
> org.apache.solr.SolrTestCaseJ4.assertJQ(SolrTestCaseJ4.java:990)
>[junit4]>  at 
> org.apache.solr.SolrTestCaseJ4.assertJQ(SolrTestCaseJ4.java:937)
>[junit4]>  at 
> org.apache.solr.search.TestRecovery.testCorruptLog(TestRecovery.java:1367)
>[junit4]>  at java.lang.Thread.run(Thread.java:748)
> [...]
>[junit4]   2> NOTE: test params are: codec=Asserting(Lucene70): 
> {_root_=PostingsFormat(name=LuceneVarGapFixedInterval), 
> id=PostingsFormat(name=Direct)}, 
> docValues:{_version_=DocValuesFormat(name=Lucene70), 
> val_i_dvo=DocValuesFormat(name=Memory), val_i=DocValuesFormat(name=Memory)}, 
> maxPointsInLeafNode=1937, maxMBSortInHeap=7.529691259992591, 
> sim=RandomSimilarity(queryNorm=false): {}, locale=de-GR, 
> timezone=Europe/London
>[junit4]   2> NOTE: Linux 4.1.0-custom2-amd64 amd64/Oracle Corporation 
> 1.8.0_151 (64-bit)/cpus=16,threads=1,free=217064096,total=530579456
> {noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-9241) Rebalance API for SolrCloud

2018-04-03 Thread Noble Paul (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-9241?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424767#comment-16424767
 ] 

Noble Paul commented on SOLR-9241:
--

This does not do resharding. BTW ,the autoscaling will have some kind of 
rebalance feature soon

> Rebalance API for SolrCloud
> ---
>
> Key: SOLR-9241
> URL: https://issues.apache.org/jira/browse/SOLR-9241
> Project: Solr
>  Issue Type: New Feature
>  Components: SolrCloud
>Affects Versions: 6.1
> Environment: Ubuntu, Mac OsX
>Reporter: Nitin Sharma
>Priority: Major
>  Labels: Cluster, SolrCloud
> Fix For: 6.1
>
> Attachments: Redistribute_After.jpeg, Redistribute_Before.jpeg, 
> Redistribute_call.jpeg, Replace_After.jpeg, Replace_Before.jpeg, 
> Replace_Call.jpeg, SOLR-9241-4.6.patch, SOLR-9241-6.1.patch
>
>   Original Estimate: 2,016h
>  Remaining Estimate: 2,016h
>
> This is the v1 of the patch for Solrcloud Rebalance api (as described in 
> http://engineering.bloomreach.com/solrcloud-rebalance-api/) , built at 
> Bloomreach by Nitin Sharma and Suruchi Shah. The goal of the API  is to 
> provide a zero downtime mechanism to perform data manipulation and  efficient 
> core allocation in solrcloud. This API was envisioned to be the base layer 
> that enables Solrcloud to be an auto scaling platform. (and work in unison 
> with other complementing monitoring and scaling features).
> Patch Status:
> ===
> The patch is work in progress and incremental. We have done a few rounds of 
> code clean up. We wanted to get the patch going first to get initial feed 
> back.  We will continue to work on making it more open source friendly and 
> easily testable.
>  Deployment Status:
> 
> The platform is deployed in production at bloomreach and has been battle 
> tested for large scale load. (millions of documents and hundreds of 
> collections).
>  Internals:
> =
> The internals of the API and performance : 
> http://engineering.bloomreach.com/solrcloud-rebalance-api/
> It is built on top of the admin collections API as an action (with various 
> flavors). At a high level, the rebalance api provides 2 constructs:
> Scaling Strategy:  Decides how to move the data.  Every flavor has multiple 
> options which can be reviewed in the api spec.
> Re-distribute  - Move around data in the cluster based on capacity/allocation.
> Auto Shard  - Dynamically shard a collection to any size.
> Smart Merge - Distributed Mode - Helps merging data from a larger shard setup 
> into smaller one.  (the source should be divisible by destination)
> Scale up -  Add replicas on the fly
> Scale Down - Remove replicas on the fly
> Allocation Strategy:  Decides where to put the data.  (Nodes with least 
> cores, Nodes that do not have this collection etc). Custom implementations 
> can be built on top as well. One other example is Availability Zone aware. 
> Distribute data such that every replica is placed on different availability 
> zone to support HA.
>  Detailed API Spec:
> 
>   https://github.com/bloomreach/solrcloud-rebalance-api
>  Contributors:
> =
>   Nitin Sharma
>   Suruchi Shah
>  Questions/Comments:
> =
>   You can reach me at nitin...@gmail.com



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS-EA] Lucene-Solr-master-Linux (64bit/jdk-11-ea+5) - Build # 21750 - Unstable!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21750/
Java: 64bit/jdk-11-ea+5 -XX:-UseCompressedOops -XX:+UseParallelGC

1 tests failed.
FAILED:  org.apache.solr.cloud.AliasIntegrationTest.testModifyPropertiesV1

Error Message:
Unexpected status: HTTP/1.1 400 Bad Request

Stack Trace:
java.lang.AssertionError: Unexpected status: HTTP/1.1 400 Bad Request
at 
__randomizedtesting.SeedInfo.seed([59C801B98458B710:7A7980D9B932B5C7]:0)
at org.junit.Assert.fail(Assert.java:93)
at 
org.apache.solr.cloud.AliasIntegrationTest.assertSuccess(AliasIntegrationTest.java:320)
at 
org.apache.solr.cloud.AliasIntegrationTest.testModifyPropertiesV1(AliasIntegrationTest.java:253)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:841)




Build Log:
[...truncated 1853 lines...]
   [junit4] JVM J0: stdout was not empty, see: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/core/test/temp/junit4-J0-20180403_223129_2909765946219504538887.sysout
   [junit4

[jira] [Commented] (SOLR-12134) validate links to javadocs in ref-guide & hook all ref-guide validation into top level documentation/precommit

2018-04-03 Thread Hoss Man (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12134?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424735#comment-16424735
 ] 

Hoss Man commented on SOLR-12134:
-

I've pushed to master.  I'll leave it there a few days to make sure no one runs 
into any "false failures" before back porting to 7x.

> validate links to javadocs in ref-guide & hook all ref-guide validation into 
> top level documentation/precommit
> --
>
> Key: SOLR-12134
> URL: https://issues.apache.org/jira/browse/SOLR-12134
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Hoss Man
>Assignee: Hoss Man
>Priority: Major
> Attachments: SOLR-12134.patch, SOLR-12134.patch, 
> nocommit.SOLR-12134.sample-failures.patch
>
>
> We've seen a couple problems come up recently where the ref-guide had broken 
> links ot javadocs.
> In some cases these are because people made typos in java classnames / 
> pathnames while editing the docs - but in other cases the problems were that 
> the docs were correct at one point, but then later the class was 
> moved/renamed/removed, or had it's access level downgraded from public to 
> private (after deprecation)
> I've worked up a patch with some ideas to help us catch these types of 
> mistakes - and in general to hook the "bare-bones HTML" validation (which 
> does not require jekyll or any non-ivy managed external dependencies) into 
> {{ant precommit}}
> Details to follow in comment/patch...



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12134) validate links to javadocs in ref-guide & hook all ref-guide validation into top level documentation/precommit

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12134?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424730#comment-16424730
 ] 

ASF subversion and git services commented on SOLR-12134:


Commit c0709f113d78ee5e033edfef24e027bc63fa96f9 in lucene-solr's branch 
refs/heads/master from Chris Hostetter
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=c0709f1 ]

SOLR-12134: hook ref-guide 'bare-bones-html' validation into top level 
documentation target using relative javadoc URL prefixess that are now 
validated to point to real files


> validate links to javadocs in ref-guide & hook all ref-guide validation into 
> top level documentation/precommit
> --
>
> Key: SOLR-12134
> URL: https://issues.apache.org/jira/browse/SOLR-12134
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Hoss Man
>Assignee: Hoss Man
>Priority: Major
> Attachments: SOLR-12134.patch, SOLR-12134.patch, 
> nocommit.SOLR-12134.sample-failures.patch
>
>
> We've seen a couple problems come up recently where the ref-guide had broken 
> links ot javadocs.
> In some cases these are because people made typos in java classnames / 
> pathnames while editing the docs - but in other cases the problems were that 
> the docs were correct at one point, but then later the class was 
> moved/renamed/removed, or had it's access level downgraded from public to 
> private (after deprecation)
> I've worked up a patch with some ideas to help us catch these types of 
> mistakes - and in general to hook the "bare-bones HTML" validation (which 
> does not require jekyll or any non-ivy managed external dependencies) into 
> {{ant precommit}}
> Details to follow in comment/patch...



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-6305) Ability to set the replication factor for index files created by HDFSDirectoryFactory

2018-04-03 Thread Boris Pasko (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-6305?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424712#comment-16424712
 ] 

Boris Pasko commented on SOLR-6305:
---

I have run
{code:java}
ant test{code}
and I see 1 test failure:
{code:java}
   [junit4] Tests with failures [seed: B470387B3BBAF803]:
   [junit4]   - 
org.apache.solr.cloud.CollectionsAPIDistributedZkTest.testCollect{code}
However, when this test is run in Eclipse, no errors reported. I assume this 
test is irrelevant and failing randomly.

 

> Ability to set the replication factor for index files created by 
> HDFSDirectoryFactory
> -
>
> Key: SOLR-6305
> URL: https://issues.apache.org/jira/browse/SOLR-6305
> Project: Solr
>  Issue Type: Improvement
>  Components: hdfs
> Environment: hadoop-2.2.0
>Reporter: Timothy Potter
>Priority: Major
> Attachments: 
> 0001-OIQ-23224-SOLR-6305-Fixed-SOLR-6305-by-reading-the-r.patch
>
>
> HdfsFileWriter doesn't allow us to create files in HDFS with a different 
> replication factor than the configured DFS default because it uses: 
> {{FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);}}
> Since we have two forms of replication going on when using 
> HDFSDirectoryFactory, it would be nice to be able to set the HDFS replication 
> factor for the Solr directories to a lower value than the default. I realize 
> this might reduce the chance of data locality but since Solr cores each have 
> their own path in HDFS, we should give operators the option to reduce it.
> My original thinking was to just use Hadoop setrep to customize the 
> replication factor, but that's a one-time shot and doesn't affect new files 
> created. For instance, I did:
> {{hadoop fs -setrep -R 1 solr49/coll1}}
> My default dfs replication is set to 3 ^^ I'm setting it to 1 just as an 
> example
> Then added some more docs to the coll1 and did:
> {{hadoop fs -stat %r solr49/hdfs1/core_node1/data/index/segments_3}}
> 3 <-- should be 1
> So it looks like new files don't inherit the repfact from their parent 
> directory.
> Not sure if we need to go as far as allowing different replication factor per 
> collection but that should be considered if possible.
> I looked at the Hadoop 2.2.0 code to see if there was a way to work through 
> this using the Configuration object but nothing jumped out at me ... and the 
> implementation for getServerDefaults(path) is just:
>   public FsServerDefaults getServerDefaults(Path p) throws IOException {
> return getServerDefaults();
>   }
> Path is ignored ;-)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-6305) Ability to set the replication factor for index files created by HDFSDirectoryFactory

2018-04-03 Thread Boris Pasko (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-6305?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424704#comment-16424704
 ] 

Boris Pasko commented on SOLR-6305:
---

Here is the patch 
[^0001-OIQ-23224-SOLR-6305-Fixed-SOLR-6305-by-reading-the-r.patch] for solr 
6.6.3.

It is very simple. Instead of relying on server-provided default, reread the 
replication factor from DFS client config.
{code:java}
private static final OutputStream getOutputStream(FileSystem fileSystem, Path 
path) throws IOException {
    Configuration conf = fileSystem.getConf();
    FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
+   short replication = fileSystem.getDefaultReplication(path);
    EnumSet flags = EnumSet.of(CreateFlag.CREATE,
    CreateFlag.OVERWRITE);
    if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
  flags.add(CreateFlag.SYNC_BLOCK);
    }
    return fileSystem.create(path, FsPermission.getDefault()
    .applyUMask(FsPermission.getUMask(conf)), flags, fsDefaults
+   .getFileBufferSize(), replication, fsDefaults
    .getBlockSize(), null);
  }{code}
I have tested this on real hardware cluster and it generates files with 
replication factor set in /etc/hbase/conf/hdfs-site.xml (provided in 
solrconfig.xml).

I haven't found any HdfsFileWriter unit tests so haven't modified any. 

I'm running 'ant test' with the patch.

> Ability to set the replication factor for index files created by 
> HDFSDirectoryFactory
> -
>
> Key: SOLR-6305
> URL: https://issues.apache.org/jira/browse/SOLR-6305
> Project: Solr
>  Issue Type: Improvement
>  Components: hdfs
> Environment: hadoop-2.2.0
>Reporter: Timothy Potter
>Priority: Major
> Attachments: 
> 0001-OIQ-23224-SOLR-6305-Fixed-SOLR-6305-by-reading-the-r.patch
>
>
> HdfsFileWriter doesn't allow us to create files in HDFS with a different 
> replication factor than the configured DFS default because it uses: 
> {{FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);}}
> Since we have two forms of replication going on when using 
> HDFSDirectoryFactory, it would be nice to be able to set the HDFS replication 
> factor for the Solr directories to a lower value than the default. I realize 
> this might reduce the chance of data locality but since Solr cores each have 
> their own path in HDFS, we should give operators the option to reduce it.
> My original thinking was to just use Hadoop setrep to customize the 
> replication factor, but that's a one-time shot and doesn't affect new files 
> created. For instance, I did:
> {{hadoop fs -setrep -R 1 solr49/coll1}}
> My default dfs replication is set to 3 ^^ I'm setting it to 1 just as an 
> example
> Then added some more docs to the coll1 and did:
> {{hadoop fs -stat %r solr49/hdfs1/core_node1/data/index/segments_3}}
> 3 <-- should be 1
> So it looks like new files don't inherit the repfact from their parent 
> directory.
> Not sure if we need to go as far as allowing different replication factor per 
> collection but that should be considered if possible.
> I looked at the Hadoop 2.2.0 code to see if there was a way to work through 
> this using the Configuration object but nothing jumped out at me ... and the 
> implementation for getServerDefaults(path) is just:
>   public FsServerDefaults getServerDefaults(Path p) throws IOException {
> return getServerDefaults();
>   }
> Path is ignored ;-)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12172) Race condition in collection properties can cause invalid cache of properties

2018-04-03 Thread JIRA

[ 
https://issues.apache.org/jira/browse/SOLR-12172?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424689#comment-16424689
 ] 

Tomás Fernández Löbbe commented on SOLR-12172:
--

Thanks for the review [~shalinmangar]. I thought about doing something like 
that, but decided not to since it requires to keep something like a map with 
collection -> version and handling it made the code more complex. I'll put up a 
patch, maybe it's still better to go that route anyway

> Race condition in collection properties can cause invalid cache of properties
> -
>
> Key: SOLR-12172
> URL: https://issues.apache.org/jira/browse/SOLR-12172
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Tests
>Reporter: Tomás Fernández Löbbe
>Assignee: Tomás Fernández Löbbe
>Priority: Minor
> Fix For: 7.4, master (8.0)
>
> Attachments: SOLR-12172.patch
>
>
> From: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-master/24
> {noformat}
> java.lang.AssertionError: Could not see value change after setting collection 
> property. Name: property2, current value: value2, expected value: newValue
>   at 
> __randomizedtesting.SeedInfo.seed([1BCE6473A2A5E68A:FD89A9BD30939A79]:0)
>   at org.junit.Assert.fail(Assert.java:93)
>   at 
> org.apache.solr.cloud.CollectionPropsTest.waitForValue(CollectionPropsTest.java:146)
>   at 
> org.apache.solr.cloud.CollectionPropsTest.testReadWriteCached(CollectionPropsTest.java:115){noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Reopened] (SOLR-12172) Race condition in collection properties can cause invalid cache of properties

2018-04-03 Thread JIRA

 [ 
https://issues.apache.org/jira/browse/SOLR-12172?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Tomás Fernández Löbbe reopened SOLR-12172:
--

> Race condition in collection properties can cause invalid cache of properties
> -
>
> Key: SOLR-12172
> URL: https://issues.apache.org/jira/browse/SOLR-12172
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Tests
>Reporter: Tomás Fernández Löbbe
>Assignee: Tomás Fernández Löbbe
>Priority: Minor
> Fix For: 7.4, master (8.0)
>
> Attachments: SOLR-12172.patch
>
>
> From: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-master/24
> {noformat}
> java.lang.AssertionError: Could not see value change after setting collection 
> property. Name: property2, current value: value2, expected value: newValue
>   at 
> __randomizedtesting.SeedInfo.seed([1BCE6473A2A5E68A:FD89A9BD30939A79]:0)
>   at org.junit.Assert.fail(Assert.java:93)
>   at 
> org.apache.solr.cloud.CollectionPropsTest.waitForValue(CollectionPropsTest.java:146)
>   at 
> org.apache.solr.cloud.CollectionPropsTest.testReadWriteCached(CollectionPropsTest.java:115){noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-6305) Ability to set the replication factor for index files created by HDFSDirectoryFactory

2018-04-03 Thread Boris Pasko (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-6305?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Boris Pasko updated SOLR-6305:
--
Attachment: 0001-OIQ-23224-SOLR-6305-Fixed-SOLR-6305-by-reading-the-r.patch

> Ability to set the replication factor for index files created by 
> HDFSDirectoryFactory
> -
>
> Key: SOLR-6305
> URL: https://issues.apache.org/jira/browse/SOLR-6305
> Project: Solr
>  Issue Type: Improvement
>  Components: hdfs
> Environment: hadoop-2.2.0
>Reporter: Timothy Potter
>Priority: Major
> Attachments: 
> 0001-OIQ-23224-SOLR-6305-Fixed-SOLR-6305-by-reading-the-r.patch
>
>
> HdfsFileWriter doesn't allow us to create files in HDFS with a different 
> replication factor than the configured DFS default because it uses: 
> {{FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);}}
> Since we have two forms of replication going on when using 
> HDFSDirectoryFactory, it would be nice to be able to set the HDFS replication 
> factor for the Solr directories to a lower value than the default. I realize 
> this might reduce the chance of data locality but since Solr cores each have 
> their own path in HDFS, we should give operators the option to reduce it.
> My original thinking was to just use Hadoop setrep to customize the 
> replication factor, but that's a one-time shot and doesn't affect new files 
> created. For instance, I did:
> {{hadoop fs -setrep -R 1 solr49/coll1}}
> My default dfs replication is set to 3 ^^ I'm setting it to 1 just as an 
> example
> Then added some more docs to the coll1 and did:
> {{hadoop fs -stat %r solr49/hdfs1/core_node1/data/index/segments_3}}
> 3 <-- should be 1
> So it looks like new files don't inherit the repfact from their parent 
> directory.
> Not sure if we need to go as far as allowing different replication factor per 
> collection but that should be considered if possible.
> I looked at the Hadoop 2.2.0 code to see if there was a way to work through 
> this using the Configuration object but nothing jumped out at me ... and the 
> implementation for getServerDefaults(path) is just:
>   public FsServerDefaults getServerDefaults(Path p) throws IOException {
> return getServerDefaults();
>   }
> Path is ignored ;-)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12154) Disallow Log4j2 explicit usage via forbidden APIs

2018-04-03 Thread Varun Thacker (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12154?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424686#comment-16424686
 ] 

Varun Thacker commented on SOLR-12154:
--

Thanks Tomás for reviewing! I'll go ahead and commit this later today

> Disallow Log4j2 explicit usage via forbidden APIs
> -
>
> Key: SOLR-12154
> URL: https://issues.apache.org/jira/browse/SOLR-12154
> Project: Solr
>  Issue Type: Sub-task
>Reporter: Varun Thacker
>Assignee: Varun Thacker
>Priority: Blocker
> Fix For: 7.4
>
> Attachments: SOLR-12154.patch, SOLR-12154.patch
>
>
> We need to add org.apache.logging.log4j.** to forbidden APIs
> From [Tomás|https://reviews.apache.org/users/tflobbe/] on the reviewboard 
> discussion ( [https://reviews.apache.org/r/65888/] ) 
> {quote} We *don't* do log4j calls in the code in general, we have that 
> explicitly forbidden in forbidden APIS today, and code that does something 
> with log4j has to supress that. Developers must instead use slf4j APIs. I 
> don't believe that's changing now with log4j2, or does it?
> {quote}
> We need to address this before 7.4 to make sure we don't break anything by 
> using Log4j2 directly 
> After SOLR-7887 the following classes explicitly import the 
> org.apache.logging.log4j.** package so let's validate it's usage
> - Log4j2Watcher
> - SolrLogLayout
> - StartupLoggingUtils
> - RequestLoggingTest
> - LoggingHandlerTest
> - SolrTestCaseJ4
> - TestLogLevelAnnotations
> - LogLevel



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12154) Disallow Log4j2 explicit usage via forbidden APIs

2018-04-03 Thread JIRA

[ 
https://issues.apache.org/jira/browse/SOLR-12154?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424681#comment-16424681
 ] 

Tomás Fernández Löbbe commented on SOLR-12154:
--

+1
Thanks for addressing this.

> Disallow Log4j2 explicit usage via forbidden APIs
> -
>
> Key: SOLR-12154
> URL: https://issues.apache.org/jira/browse/SOLR-12154
> Project: Solr
>  Issue Type: Sub-task
>Reporter: Varun Thacker
>Assignee: Varun Thacker
>Priority: Blocker
> Fix For: 7.4
>
> Attachments: SOLR-12154.patch, SOLR-12154.patch
>
>
> We need to add org.apache.logging.log4j.** to forbidden APIs
> From [Tomás|https://reviews.apache.org/users/tflobbe/] on the reviewboard 
> discussion ( [https://reviews.apache.org/r/65888/] ) 
> {quote} We *don't* do log4j calls in the code in general, we have that 
> explicitly forbidden in forbidden APIS today, and code that does something 
> with log4j has to supress that. Developers must instead use slf4j APIs. I 
> don't believe that's changing now with log4j2, or does it?
> {quote}
> We need to address this before 7.4 to make sure we don't break anything by 
> using Log4j2 directly 
> After SOLR-7887 the following classes explicitly import the 
> org.apache.logging.log4j.** package so let's validate it's usage
> - Log4j2Watcher
> - SolrLogLayout
> - StartupLoggingUtils
> - RequestLoggingTest
> - LoggingHandlerTest
> - SolrTestCaseJ4
> - TestLogLevelAnnotations
> - LogLevel



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12165) Ref Guide: DisMax default mm param value is improperly documented as 100%

2018-04-03 Thread Steve Rowe (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12165?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Steve Rowe resolved SOLR-12165.
---
   Resolution: Fixed
Fix Version/s: 7.4

> Ref Guide: DisMax default mm param value is improperly documented as 100%
> -
>
> Key: SOLR-12165
> URL: https://issues.apache.org/jira/browse/SOLR-12165
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Steve Rowe
>Assignee: Steve Rowe
>Priority: Major
> Fix For: 7.4
>
> Attachments: SOLR-12165.patch
>
>
> {{DisMaxQParser.parseMinShouldMatch()}} sets default {{mm}} to 100% if 
> {{q.op}}=="AND", and to 0% otherwise.
> {{ExtendedDismaxQParser.parseOriginalQuery()}} sets default {{mm}} to 0% if 
> there are explicit operators other than "AND" in the query (see SOLR-2649 and 
> SOLR-8812), and otherwise falls through to dismax’s logic.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12165) Ref Guide: DisMax default mm param value is improperly documented as 100%

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12165?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424668#comment-16424668
 ] 

ASF subversion and git services commented on SOLR-12165:


Commit 56834dfa2b6319990f1bebd2097afdc2e6c67a4c in lucene-solr's branch 
refs/heads/branch_7x from [~steve_rowe]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=56834df ]

SOLR-12165: Ref Guide: DisMax default mm param value is improperly documented 
as 100%


> Ref Guide: DisMax default mm param value is improperly documented as 100%
> -
>
> Key: SOLR-12165
> URL: https://issues.apache.org/jira/browse/SOLR-12165
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Steve Rowe
>Assignee: Steve Rowe
>Priority: Major
> Attachments: SOLR-12165.patch
>
>
> {{DisMaxQParser.parseMinShouldMatch()}} sets default {{mm}} to 100% if 
> {{q.op}}=="AND", and to 0% otherwise.
> {{ExtendedDismaxQParser.parseOriginalQuery()}} sets default {{mm}} to 0% if 
> there are explicit operators other than "AND" in the query (see SOLR-2649 and 
> SOLR-8812), and otherwise falls through to dismax’s logic.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12165) Ref Guide: DisMax default mm param value is improperly documented as 100%

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12165?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424669#comment-16424669
 ] 

ASF subversion and git services commented on SOLR-12165:


Commit b87cbc2f75deb4e80b2b04fde6015369be6ad9cb in lucene-solr's branch 
refs/heads/master from [~steve_rowe]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=b87cbc2 ]

SOLR-12165: Ref Guide: DisMax default mm param value is improperly documented 
as 100%


> Ref Guide: DisMax default mm param value is improperly documented as 100%
> -
>
> Key: SOLR-12165
> URL: https://issues.apache.org/jira/browse/SOLR-12165
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Steve Rowe
>Assignee: Steve Rowe
>Priority: Major
> Attachments: SOLR-12165.patch
>
>
> {{DisMaxQParser.parseMinShouldMatch()}} sets default {{mm}} to 100% if 
> {{q.op}}=="AND", and to 0% otherwise.
> {{ExtendedDismaxQParser.parseOriginalQuery()}} sets default {{mm}} to 0% if 
> there are explicit operators other than "AND" in the query (see SOLR-2649 and 
> SOLR-8812), and otherwise falls through to dismax’s logic.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Deleted] (SOLR-12177) Zorlani search engine

2018-04-03 Thread Steve Rowe (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12177?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Steve Rowe deleted SOLR-12177:
--


> Zorlani search engine
> -
>
> Key: SOLR-12177
> URL: https://issues.apache.org/jira/browse/SOLR-12177
> Project: Solr
>  Issue Type: New Feature
>  Security Level: Public(Default Security Level. Issues are Public) 
> Environment: search engine
>Reporter: fabrice
>Priority: Major
>  Labels: security
>
> Search engine



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-12019) Prepare Streaming Expressions for machine learning functions

2018-04-03 Thread Joel Bernstein (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12019?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joel Bernstein updated SOLR-12019:
--
Component/s: streaming expressions

> Prepare Streaming Expressions for machine learning functions
> 
>
> Key: SOLR-12019
> URL: https://issues.apache.org/jira/browse/SOLR-12019
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: streaming expressions
>Reporter: Joel Bernstein
>Assignee: Joel Bernstein
>Priority: Major
> Fix For: 7.4
>
>
> This ticket is to prepare the Streaming Expressions framework for the next 
> phase of development which will focus on *machine learning*.
> Because this next phase will involve a large number of new functions it will 
> be important to prepare the Streaming Expressions framework before getting 
> started.
> There are three main goals of the ticket:
> 1) Refactoring of code and test cases to prepare for the new machine learning 
> functions.
> 2) Improve the documentation of the current statistical functions and 
> refactor the docs so they can support the new machine learning functions.
> 3) Integrate the [http://haifengl.github.io/smile/] libraries. Now that the 
> *Apache Commons Math* integration is close to completion its time to start on 
> the *Smile* machine learning integration.
>  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-12019) Prepare Streaming Expressions for machine learning functions

2018-04-03 Thread Joel Bernstein (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12019?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joel Bernstein updated SOLR-12019:
--
Fix Version/s: 7.4

> Prepare Streaming Expressions for machine learning functions
> 
>
> Key: SOLR-12019
> URL: https://issues.apache.org/jira/browse/SOLR-12019
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: streaming expressions
>Reporter: Joel Bernstein
>Assignee: Joel Bernstein
>Priority: Major
> Fix For: 7.4
>
>
> This ticket is to prepare the Streaming Expressions framework for the next 
> phase of development which will focus on *machine learning*.
> Because this next phase will involve a large number of new functions it will 
> be important to prepare the Streaming Expressions framework before getting 
> started.
> There are three main goals of the ticket:
> 1) Refactoring of code and test cases to prepare for the new machine learning 
> functions.
> 2) Improve the documentation of the current statistical functions and 
> refactor the docs so they can support the new machine learning functions.
> 3) Integrate the [http://haifengl.github.io/smile/] libraries. Now that the 
> *Apache Commons Math* integration is close to completion its time to start on 
> the *Smile* machine learning integration.
>  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-12183) Refactor Streaming Expression test cases

2018-04-03 Thread Joel Bernstein (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12183?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joel Bernstein updated SOLR-12183:
--
Fix Version/s: 7.4

> Refactor Streaming Expression test cases
> 
>
> Key: SOLR-12183
> URL: https://issues.apache.org/jira/browse/SOLR-12183
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Joel Bernstein
>Assignee: Joel Bernstein
>Priority: Major
> Fix For: 7.4
>
>
> This ticket will breakup the StreamExpressionTest into multiple smaller files 
> based on the following areas:
> 1) Stream Sources
> 2) Stream Decorators
> 3) Stream Evaluators (This may have to be broken up more in the future)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-12183) Refactor Streaming Expression test cases

2018-04-03 Thread Joel Bernstein (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12183?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joel Bernstein updated SOLR-12183:
--
Summary: Refactor Streaming Expression test cases  (was: Refactor Streaming 
Expressions test cases)

> Refactor Streaming Expression test cases
> 
>
> Key: SOLR-12183
> URL: https://issues.apache.org/jira/browse/SOLR-12183
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Joel Bernstein
>Priority: Major
> Fix For: 7.4
>
>
> This ticket will breakup the StreamExpressionTest into multiple smaller files 
> based on the following areas:
> 1) Stream Sources
> 2) Stream Decorators
> 3) Stream Evaluators (This may have to be broken up more in the future)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Assigned] (SOLR-12183) Refactor Streaming Expression test cases

2018-04-03 Thread Joel Bernstein (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12183?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joel Bernstein reassigned SOLR-12183:
-

Assignee: Joel Bernstein

> Refactor Streaming Expression test cases
> 
>
> Key: SOLR-12183
> URL: https://issues.apache.org/jira/browse/SOLR-12183
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Joel Bernstein
>Assignee: Joel Bernstein
>Priority: Major
> Fix For: 7.4
>
>
> This ticket will breakup the StreamExpressionTest into multiple smaller files 
> based on the following areas:
> 1) Stream Sources
> 2) Stream Decorators
> 3) Stream Evaluators (This may have to be broken up more in the future)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12183) Refactor Streaming Expressions test cases

2018-04-03 Thread Joel Bernstein (JIRA)
Joel Bernstein created SOLR-12183:
-

 Summary: Refactor Streaming Expressions test cases
 Key: SOLR-12183
 URL: https://issues.apache.org/jira/browse/SOLR-12183
 Project: Solr
  Issue Type: Improvement
  Security Level: Public (Default Security Level. Issues are Public)
Reporter: Joel Bernstein


This ticket will breakup the StreamExpressionTest into multiple smaller files 
based on the following areas:

1) Stream Sources

2) Stream Decorators

3) Stream Evaluators (This may have to be broken up more in the future)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Re: VOTE: Apache Solr Reference Guide for Solr 7.3 RC1

2018-04-03 Thread Tomas Fernandez Lobbe
+1

> On Apr 3, 2018, at 12:45 PM, Varun Thacker  wrote:
> 
> +1
> 
> On Tue, Apr 3, 2018 at 10:47 AM, Steve Rowe  > wrote:
> +1
> 
> --
> Steve
> www.lucidworks.com 
> 
> > On Apr 3, 2018, at 10:06 AM, Mikhail Khludnev  > > wrote:
> >
> > I've looked through recent changes in PDF. It seems good.
> >
> > On Tue, Apr 3, 2018 at 4:32 PM, Cassandra Targett  > > wrote:
> > Reminder about this.
> >
> > It looks like the Lucene/Solr release vote is going to pass, so we could 
> > have both released at about the same time.
> >
> > Thanks,
> > Cassandra
> >
> > On Thu, Mar 29, 2018 at 10:49 AM, Cassandra Targett  > > wrote:
> > Please vote to release the Apache Solr Reference Guide for Solr 7.3.
> >
> > The artifacts can be downloaded from:
> > https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-7.3-RC1/
> >  
> > 
> >
> > $ cat apache-solr-ref-guide-7.3.pdf.sha1
> > 151f06d920d1ac41564f3c0ddabae3c2c36b6892  apache-solr-ref-guide-7.3.pdf
> >
> > The HTML version has also been uploaded to the website:
> > https://lucene.apache.org/solr/guide/7_3/ 
> > 
> >
> > Here's my +1.
> >
> > If it happens that this vote passes before the vote for the final 
> > Lucene/Solr RC is complete, I'll hold release/announcement of the Ref Guide 
> > until the vote is complete and the release steps are finished.
> >
> > Thanks,
> > Cassandra
> >
> >
> >
> >
> > --
> > Sincerely yours
> > Mikhail Khludnev
> 
> 
> -
> To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org 
> 
> For additional commands, e-mail: dev-h...@lucene.apache.org 
> 
> 
> 



[jira] [Updated] (SOLR-12175) Add random field type and dynamic field to the default managed-schema

2018-04-03 Thread Joel Bernstein (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12175?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joel Bernstein updated SOLR-12175:
--
Attachment: SOLR-12175.patch

> Add random field type and dynamic field to the default managed-schema
> -
>
> Key: SOLR-12175
> URL: https://issues.apache.org/jira/browse/SOLR-12175
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Reporter: Joel Bernstein
>Priority: Major
> Attachments: SOLR-12175.patch
>
>
> Currently the default manage-schema file doesn't have the random field 
> configured. Both the techproducts and example manage-schema files have it 
> configured. This ticket will add the random dynamic field and field type to 
> the default managed-schema so this functionality is available out of the box 
> when using the default schema.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-Solaris (64bit/jdk1.8.0) - Build # 1780 - Still Unstable!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Solaris/1780/
Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseParallelGC

21 tests failed.
FAILED:  
org.apache.solr.cloud.autoscaling.sim.TestTriggerIntegration.testTriggerThrottling

Error Message:
Both triggers should have fired by now

Stack Trace:
java.lang.AssertionError: Both triggers should have fired by now
at 
__randomizedtesting.SeedInfo.seed([BD0B4D436E7EE9EE:4629E566BCD40A7C]:0)
at org.junit.Assert.fail(Assert.java:93)
at 
org.apache.solr.cloud.autoscaling.sim.TestTriggerIntegration.testTriggerThrottling(TestTriggerIntegration.java:225)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  org.apache.solr.search.TestRecovery.testBuffering

Error Message:


Stack Trace:
java.lang.NullPointerException
at 
__randomizedtesting.SeedInfo.seed([BD0B4D436E7EE9EE:A0E5E368CF2748C5]:0)
at 
org.apache.solr.search.TestRecovery.testBuffering(TestRecovery

[JENKINS] Lucene-Solr-NightlyTests-master - Build # 1520 - Still Failing

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1520/

1 tests failed.
FAILED:  org.apache.solr.cloud.hdfs.StressHdfsTest.test

Error Message:
Could not find collection:delete_data_dir

Stack Trace:
java.lang.AssertionError: Could not find collection:delete_data_dir
at 
__randomizedtesting.SeedInfo.seed([378D2E79D9079884:BFD911A377FBF57C]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at org.junit.Assert.assertNotNull(Assert.java:526)
at 
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:155)
at 
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:140)
at 
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:135)
at 
org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:915)
at 
org.apache.solr.cloud.hdfs.StressHdfsTest.test(StressHdfsTest.java:114)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
   

Re: [VOTE] Release Lucene/Solr 7.3.0 RC2

2018-04-03 Thread Alan Woodward
The vote has passed.  Thanks all for testing, I’ll upload the artefacts and 
send out announcements tomorrow.

> On 2 Apr 2018, at 17:26, Yonik Seeley  wrote:
> 
> +1
> 
> -Yonik
> 
> 
> On Wed, Mar 28, 2018 at 1:11 PM, Alan Woodward  wrote:
>> Please vote for release candidate 2 for Lucene/Solr 7.3.0
>> 
>> The artefacts can be downloaded from:
>> https://dist.apache.org/repos/dist/dev/lucene/lucene-solr-7.3.0-RC2-rev98a6b3d642928b1ac9076c6c5a369472581f7633
>> 
>> You can run the smoke tester directly with this command:
>> python3 -u dev-tools/scripts/smokeTestRelease.py
>> https://dist.apache.org/repos/dist/dev/lucene/lucene-solr-7.3.0-RC2-rev98a6b3d642928b1ac9076c6c5a369472581f7633
>> 
>> Here’s my +1
>> SUCCESS! [1:08:28.045253]
>> 
>> 
>> Note that this vote will be open a little longer than usual as it’s a Bank
>> Holiday weekend in the UK.  If there are no -1s, the vote will close on
>> Tuesday April 3rd.
> 
> -
> To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
> For additional commands, e-mail: dev-h...@lucene.apache.org
> 


-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Re: VOTE: Apache Solr Reference Guide for Solr 7.3 RC1

2018-04-03 Thread Varun Thacker
+1

On Tue, Apr 3, 2018 at 10:47 AM, Steve Rowe  wrote:

> +1
>
> --
> Steve
> www.lucidworks.com
>
> > On Apr 3, 2018, at 10:06 AM, Mikhail Khludnev  wrote:
> >
> > I've looked through recent changes in PDF. It seems good.
> >
> > On Tue, Apr 3, 2018 at 4:32 PM, Cassandra Targett 
> wrote:
> > Reminder about this.
> >
> > It looks like the Lucene/Solr release vote is going to pass, so we could
> have both released at about the same time.
> >
> > Thanks,
> > Cassandra
> >
> > On Thu, Mar 29, 2018 at 10:49 AM, Cassandra Targett <
> casstarg...@gmail.com> wrote:
> > Please vote to release the Apache Solr Reference Guide for Solr 7.3.
> >
> > The artifacts can be downloaded from:
> > https://dist.apache.org/repos/dist/dev/lucene/solr/ref-
> guide/apache-solr-ref-guide-7.3-RC1/
> >
> > $ cat apache-solr-ref-guide-7.3.pdf.sha1
> > 151f06d920d1ac41564f3c0ddabae3c2c36b6892  apache-solr-ref-guide-7.3.pdf
> >
> > The HTML version has also been uploaded to the website:
> > https://lucene.apache.org/solr/guide/7_3/
> >
> > Here's my +1.
> >
> > If it happens that this vote passes before the vote for the final
> Lucene/Solr RC is complete, I'll hold release/announcement of the Ref Guide
> until the vote is complete and the release steps are finished.
> >
> > Thanks,
> > Cassandra
> >
> >
> >
> >
> > --
> > Sincerely yours
> > Mikhail Khludnev
>
>
> -
> To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
> For additional commands, e-mail: dev-h...@lucene.apache.org
>
>


[jira] [Commented] (SOLR-12134) validate links to javadocs in ref-guide & hook all ref-guide validation into top level documentation/precommit

2018-04-03 Thread Cassandra Targett (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12134?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424478#comment-16424478
 ] 

Cassandra Targett commented on SOLR-12134:
--

I'm finally getting around to reviewing this.

I applied the patch and ran several build-related commands (precommit, 
documentation, etc.). I also used the Ref Guide's {{ant default 
-Dlocal.javadocs=true}}, and had it in my head it would build the javadocs as 
part of it and it didn't so it failed ALL the javadoc checking, which I'm glad 
about in retrospect.

I think removing the PDF build from precommit and documentation targets was the 
right call - it does take a while and adding 5-10 minutes for something most 
people don't need as the output was a lot.

I've been trying to think of things missing or reasons not to do this as 
implemented, but can't. So +1 from me.

> validate links to javadocs in ref-guide & hook all ref-guide validation into 
> top level documentation/precommit
> --
>
> Key: SOLR-12134
> URL: https://issues.apache.org/jira/browse/SOLR-12134
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Hoss Man
>Assignee: Hoss Man
>Priority: Major
> Attachments: SOLR-12134.patch, SOLR-12134.patch, 
> nocommit.SOLR-12134.sample-failures.patch
>
>
> We've seen a couple problems come up recently where the ref-guide had broken 
> links ot javadocs.
> In some cases these are because people made typos in java classnames / 
> pathnames while editing the docs - but in other cases the problems were that 
> the docs were correct at one point, but then later the class was 
> moved/renamed/removed, or had it's access level downgraded from public to 
> private (after deprecation)
> I've worked up a patch with some ideas to help us catch these types of 
> mistakes - and in general to hook the "bare-bones HTML" validation (which 
> does not require jekyll or any non-ivy managed external dependencies) into 
> {{ant precommit}}
> Details to follow in comment/patch...



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-5152) EdgeNGramFilterFactory deletes token

2018-04-03 Thread Shawn Heisey (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-5152?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424380#comment-16424380
 ] 

Shawn Heisey commented on SOLR-5152:


Linking as a duplicate of LUCENE-7960.  That issue is in the correct project.  
It solves the problem in a slightly different way that has more functionality.


> EdgeNGramFilterFactory deletes token
> 
>
> Key: SOLR-5152
> URL: https://issues.apache.org/jira/browse/SOLR-5152
> Project: Solr
>  Issue Type: Improvement
>Affects Versions: 4.4
>Reporter: Christoph Lingg
>Priority: Major
> Attachments: SOLR-5152-v5.0.0.patch, SOLR-5152.patch
>
>
> I am using EdgeNGramFilterFactory in my schema.xml
> {code:xml} positionIncrementGap="100">
>   
> 
>  maxGramSize="10" side="front" />
>   
> {code}
> Some tokens in my index only consist of one character, let's say {{R}}. 
> minGramSize is set to 2 and is bigger than the length of the token. I 
> expected the NGramFilter to left {{R}} unchanged but in fact it is deleting 
> the token.
> For my use case this interpretation is undesirable, and probably for most use 
> cases too!?



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-5152) EdgeNGramFilterFactory deletes token

2018-04-03 Thread Shawn Heisey (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-5152?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424379#comment-16424379
 ] 

Shawn Heisey commented on SOLR-5152:


See LUCENE-7960.  Similar idea, but treats short and long tokens separately.


> EdgeNGramFilterFactory deletes token
> 
>
> Key: SOLR-5152
> URL: https://issues.apache.org/jira/browse/SOLR-5152
> Project: Solr
>  Issue Type: Improvement
>Affects Versions: 4.4
>Reporter: Christoph Lingg
>Priority: Major
> Attachments: SOLR-5152-v5.0.0.patch, SOLR-5152.patch
>
>
> I am using EdgeNGramFilterFactory in my schema.xml
> {code:xml} positionIncrementGap="100">
>   
> 
>  maxGramSize="10" side="front" />
>   
> {code}
> Some tokens in my index only consist of one character, let's say {{R}}. 
> minGramSize is set to 2 and is bigger than the length of the token. I 
> expected the NGramFilter to left {{R}} unchanged but in fact it is deleting 
> the token.
> For my use case this interpretation is undesirable, and probably for most use 
> cases too!?



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12182) Can not switch urlScheme in 7x if there are any cores in the cluster

2018-04-03 Thread Anshum Gupta (JIRA)
Anshum Gupta created SOLR-12182:
---

 Summary: Can not switch urlScheme in 7x if there are any cores in 
the cluster
 Key: SOLR-12182
 URL: https://issues.apache.org/jira/browse/SOLR-12182
 Project: Solr
  Issue Type: Bug
  Security Level: Public (Default Security Level. Issues are Public)
Affects Versions: 7.2, 7.1, 7.0
Reporter: Anshum Gupta


I was trying to enable TLS on a cluster that was already in use i.e. had 
existing collections and ended up with down cores, that wouldn't come up and 
the following core init errors in the logs:

*org.apache.solr.common.SolrException:org.apache.solr.common.SolrException: 
replica with coreNodeName core_node4 exists but with a different name or 
base_url.*

What is happening here is that the core/replica is defined in the clusterstate 
with the urlScheme as part of it's base URL e.g. 
*"base_url":"http:hostname:port/solr"*.

Switching the urlScheme in Solr breaks this convention as the host now uses 
HTTPS instead.

Actually, I ran into this with an older version because I was running with 
*legacyCloud=false* and then realized that we switched that to the default 
behavior only in 7x i.e while most users did not hit this issue with older 
versions, unless they overrode the legacyCloud value explicitly, users running 
7x are bound to run into this more often.

Switching the value of legacyCloud to true, bouncing the cluster so that the 
clusterstate gets flushed, and then setting it back to false is a workaround 
but a bit risky one if you don't know if you have any old cores lying around.

Ideally, I think we shouldn't prepend the urlScheme to the base_url value and 
use the urlScheme on the fly to construct it.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Re: VOTE: Apache Solr Reference Guide for Solr 7.3 RC1

2018-04-03 Thread Steve Rowe
+1

--
Steve
www.lucidworks.com

> On Apr 3, 2018, at 10:06 AM, Mikhail Khludnev  wrote:
> 
> I've looked through recent changes in PDF. It seems good. 
> 
> On Tue, Apr 3, 2018 at 4:32 PM, Cassandra Targett  
> wrote:
> Reminder about this. 
> 
> It looks like the Lucene/Solr release vote is going to pass, so we could have 
> both released at about the same time.
> 
> Thanks,
> Cassandra
> 
> On Thu, Mar 29, 2018 at 10:49 AM, Cassandra Targett  
> wrote:
> Please vote to release the Apache Solr Reference Guide for Solr 7.3.
> 
> The artifacts can be downloaded from:
> https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-7.3-RC1/
> 
> $ cat apache-solr-ref-guide-7.3.pdf.sha1 
> 151f06d920d1ac41564f3c0ddabae3c2c36b6892  apache-solr-ref-guide-7.3.pdf
> 
> The HTML version has also been uploaded to the website:
> https://lucene.apache.org/solr/guide/7_3/
> 
> Here's my +1.
> 
> If it happens that this vote passes before the vote for the final Lucene/Solr 
> RC is complete, I'll hold release/announcement of the Ref Guide until the 
> vote is complete and the release steps are finished.
> 
> Thanks,
> Cassandra
> 
> 
> 
> 
> -- 
> Sincerely yours
> Mikhail Khludnev


-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12181) Add trigger based on document count

2018-04-03 Thread Andrzej Bialecki (JIRA)
Andrzej Bialecki  created SOLR-12181:


 Summary: Add trigger based on document count
 Key: SOLR-12181
 URL: https://issues.apache.org/jira/browse/SOLR-12181
 Project: Solr
  Issue Type: Sub-task
  Security Level: Public (Default Security Level. Issues are Public)
  Components: AutoScaling
Reporter: Andrzej Bialecki 
Assignee: Andrzej Bialecki 


This may turn out to be as simple as using a {{MetricTrigger}} but it's likely 
this will require some specialization, and we may want to add this type of 
trigger anyway for convenience.

The two control actions associated with this trigger will be SPLITSHARD and 
(yet nonexistent) MERGESHARD.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12179) ZKPropertiesWriter error DIH

2018-04-03 Thread Erick Erickson (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12179?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Erick Erickson resolved SOLR-12179.
---
Resolution: Not A Problem

Please raise this question on the user's list at solr-u...@lucene.apache.org, 
see: (http://lucene.apache.org/solr/community.html#mailing-lists-irc) there are 
a _lot_ more people watching that list who may be able to help. 

If it's determined that this really is a code issue in Solr and not a 
configuration/usage problem, we can raise a new JIRA or reopen this one.

Since DIH is used in Cloud by quite a number of people, I strongly suspect this 
is a configuration issue rather than a code issue.

> ZKPropertiesWriter error DIH
> 
>
> Key: SOLR-12179
> URL: https://issues.apache.org/jira/browse/SOLR-12179
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: contrib - DataImportHandler
>Affects Versions: 6.6.1
> Environment: Debian
> Solr Cloud
>Reporter: Maxence SAUNIER
>Priority: Major
>
> Hello,
> I use Solr Cloud and I test DIH system in cloud, but I have this error :
> {quote}
> Full Import 
> failed:org.apache.solr.handler.dataimport.DataImportHandlerException: Unable 
> to PropertyWriter implementation:ZKPropertiesWriter
>   at 
> org.apache.solr.handler.dataimport.DataImporter.createPropertyWriter(DataImporter.java:330)
>   at 
> org.apache.solr.handler.dataimport.DataImporter.doFullImport(DataImporter.java:411)
>   at 
> org.apache.solr.handler.dataimport.DataImporter.runCmd(DataImporter.java:474)
>   at 
> org.apache.solr.handler.dataimport.DataImporter.lambda$runAsync$0(DataImporter.java:457)
>   at java.lang.Thread.run(Thread.java:748)
> Caused by: java.lang.NullPointerException
>   at 
> org.apache.solr.handler.dataimport.DocBuilder.loadClass(DocBuilder.java:935)
>   at 
> org.apache.solr.handler.dataimport.DataImporter.createPropertyWriter(DataImporter.java:326)
>   ... 4 more
> {quote}
> My DIH definition on the cloud
> {quote}
> 
>   driver="com.mysql.jdbc.Driver" 
> url="jdbc:mysql://srv-gesloc-sql/TRANSACTIONCITYANEWLOCATION" user="ics" 
> password="IcsPerms"
>   runtimeLib="true" version="1"/>
> 
>transformer="TemplateTransformer"
>   query="SELECT id,origin FROM view_indexation_advertisements" >
>
>
>   
>  
> 
> {quote}
> Call response :
> http://localhost:8983/solr/advertisements2/full-advertisements?command=full-import&clean=false&commit=true
> {quote}
> 
> 
> 0
> 2
> 
> 
> true
> 1
> 
> DIH/advertisements.xml
> 
> 
> full-import
> idle
> 
> 
> 
> {quote}
> I don't understand why I have this error. Can you help me ?
> Thanks you.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-SmokeRelease-7.x - Build # 190 - Still Failing

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-7.x/190/

No tests ran.

Build Log:
[...truncated 30148 lines...]
prepare-release-no-sign:
[mkdir] Created dir: 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist
 [copy] Copying 491 files to 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/lucene
 [copy] Copying 230 files to 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/solr
   [smoker] Java 1.8 JAVA_HOME=/home/jenkins/tools/java/latest1.8
   [smoker] Java 9 JAVA_HOME=/home/jenkins/tools/java/latest1.9
   [smoker] NOTE: output encoding is UTF-8
   [smoker] 
   [smoker] Load release URL 
"file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/"...
   [smoker] 
   [smoker] Test Lucene...
   [smoker]   test basics...
   [smoker]   get KEYS
   [smoker] 0.2 MB in 0.01 sec (33.3 MB/sec)
   [smoker]   check changes HTML...
   [smoker]   download lucene-7.4.0-src.tgz...
   [smoker] 32.0 MB in 0.03 sec (1179.1 MB/sec)
   [smoker] verify sha1/sha512 digests
   [smoker]   download lucene-7.4.0.tgz...
   [smoker] 74.2 MB in 0.08 sec (916.6 MB/sec)
   [smoker] verify sha1/sha512 digests
   [smoker]   download lucene-7.4.0.zip...
   [smoker] 84.8 MB in 0.09 sec (944.7 MB/sec)
   [smoker] verify sha1/sha512 digests
   [smoker]   unpack lucene-7.4.0.tgz...
   [smoker] verify JAR metadata/identity/no javax.* or java.* classes...
   [smoker] test demo with 1.8...
   [smoker]   got 6322 hits for query "lucene"
   [smoker] checkindex with 1.8...
   [smoker] test demo with 9...
   [smoker]   got 6322 hits for query "lucene"
   [smoker] checkindex with 9...
   [smoker] check Lucene's javadoc JAR
   [smoker]   unpack lucene-7.4.0.zip...
   [smoker] verify JAR metadata/identity/no javax.* or java.* classes...
   [smoker] test demo with 1.8...
   [smoker]   got 6322 hits for query "lucene"
   [smoker] checkindex with 1.8...
   [smoker] test demo with 9...
   [smoker]   got 6322 hits for query "lucene"
   [smoker] checkindex with 9...
   [smoker] check Lucene's javadoc JAR
   [smoker]   unpack lucene-7.4.0-src.tgz...
   [smoker] make sure no JARs/WARs in src dist...
   [smoker] run "ant validate"
   [smoker] run tests w/ Java 8 and testArgs='-Dtests.badapples=false 
-Dtests.slow=false'...
   [smoker] test demo with 1.8...
   [smoker]   got 219 hits for query "lucene"
   [smoker] checkindex with 1.8...
   [smoker] generate javadocs w/ Java 8...
   [smoker] 
   [smoker] Crawl/parse...
   [smoker] 
   [smoker] Verify...
   [smoker] run tests w/ Java 9 and testArgs='-Dtests.badapples=false 
-Dtests.slow=false'...
   [smoker] test demo with 9...
   [smoker]   got 219 hits for query "lucene"
   [smoker] checkindex with 9...
   [smoker]   confirm all releases have coverage in TestBackwardsCompatibility
   [smoker] find all past Lucene releases...
   [smoker] run TestBackwardsCompatibility..
   [smoker] success!
   [smoker] 
   [smoker] Test Solr...
   [smoker]   test basics...
   [smoker]   get KEYS
   [smoker] 0.2 MB in 0.00 sec (262.9 MB/sec)
   [smoker]   check changes HTML...
   [smoker]   download solr-7.4.0-src.tgz...
   [smoker] 55.4 MB in 0.06 sec (893.8 MB/sec)
   [smoker] verify sha1/sha512 digests
   [smoker]   download solr-7.4.0.tgz...
   [smoker] 158.0 MB in 0.18 sec (892.8 MB/sec)
   [smoker] verify sha1/sha512 digests
   [smoker]   download solr-7.4.0.zip...
   [smoker] 159.0 MB in 0.27 sec (596.2 MB/sec)
   [smoker] verify sha1/sha512 digests
   [smoker]   unpack solr-7.4.0.tgz...
   [smoker] verify JAR metadata/identity/no javax.* or java.* classes...
   [smoker] unpack lucene-7.4.0.tgz...
   [smoker]   **WARNING**: skipping check of 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.4.0/contrib/dataimporthandler-extras/lib/javax.mail-1.5.1.jar:
 it has javax.* classes
   [smoker]   **WARNING**: skipping check of 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.4.0/contrib/dataimporthandler-extras/lib/activation-1.1.1.jar:
 it has javax.* classes
   [smoker] copying unpacked distribution for Java 8 ...
   [smoker] test solr example w/ Java 8...
   [smoker]   start Solr instance 
(log=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.4.0-java8/solr-example.log)...
   [smoker] No process found for Solr node running on port 8983
   [smoker]   Running techproducts example on port 8983 from 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unp

[jira] [Resolved] (SOLR-11670) Implement a periodic house-keeping task

2018-04-03 Thread Andrzej Bialecki (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11670?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Andrzej Bialecki  resolved SOLR-11670.
--
Resolution: Fixed

> Implement a periodic house-keeping task
> ---
>
> Key: SOLR-11670
> URL: https://issues.apache.org/jira/browse/SOLR-11670
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: AutoScaling
>Reporter: Andrzej Bialecki 
>Assignee: Andrzej Bialecki 
>Priority: Major
> Fix For: 7.4, master (8.0)
>
> Attachments: SOLR-11670.patch, SOLR-11670.patch, SOLR-11670.patch
>
>
> Some high-impact cluster changes (such as split shard) leave the original 
> data and original state that is no longer actively used. This makes sense due 
> to safety reasons and to make it easier to roll-back the changes.
> However, this unused data will accumulate over time, especially when actions 
> like split shard are invoked automatically by the autoscaling framework. We 
> need a periodic task that would clean up this kind of data after a certain 
> period.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12095) AutoScalingHandler should validate triggers before updating zookeeper

2018-04-03 Thread Andrzej Bialecki (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12095?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Andrzej Bialecki  resolved SOLR-12095.
--
Resolution: Fixed

> AutoScalingHandler should validate triggers before updating zookeeper
> -
>
> Key: SOLR-12095
> URL: https://issues.apache.org/jira/browse/SOLR-12095
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: AutoScaling, SolrCloud
>Reporter: Shalin Shekhar Mangar
>Assignee: Andrzej Bialecki 
>Priority: Major
> Fix For: 7.4, master (8.0)
>
>
> We validate policy and preferences before updating the configuration in 
> Zookeeper but we don't do that today for triggers. So users can put wrong or 
> unknown parameters and there won't be any complains from the API but the at 
> runtime exceptions will be thrown/logged.
> We should change the trigger API to have a validation step. The catch here is 
> that it may require us to instantiate the trigger class.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-12180) Add a metric for number of deleted documents

2018-04-03 Thread Andrzej Bialecki (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12180?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Andrzej Bialecki  resolved SOLR-12180.
--
Resolution: Invalid

Turns out this is already available as a core metric 
{{SEARCHER.searcher.deletedDocs}}.

> Add a metric for number of deleted documents
> 
>
> Key: SOLR-12180
> URL: https://issues.apache.org/jira/browse/SOLR-12180
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: metrics
>Reporter: Andrzej Bialecki 
>Assignee: Andrzej Bialecki 
>Priority: Minor
> Fix For: 7.4, master (8.0)
>
>




--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12095) AutoScalingHandler should validate triggers before updating zookeeper

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12095?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424302#comment-16424302
 ] 

ASF subversion and git services commented on SOLR-12095:


Commit 606cfd57f1377ec0483424911305c60c4d021dca in lucene-solr's branch 
refs/heads/branch_7x from [~ab]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=606cfd5 ]

SOLR-12095: Fix precommit issues.


> AutoScalingHandler should validate triggers before updating zookeeper
> -
>
> Key: SOLR-12095
> URL: https://issues.apache.org/jira/browse/SOLR-12095
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: AutoScaling, SolrCloud
>Reporter: Shalin Shekhar Mangar
>Assignee: Andrzej Bialecki 
>Priority: Major
> Fix For: 7.4, master (8.0)
>
>
> We validate policy and preferences before updating the configuration in 
> Zookeeper but we don't do that today for triggers. So users can put wrong or 
> unknown parameters and there won't be any complains from the API but the at 
> runtime exceptions will be thrown/logged.
> We should change the trigger API to have a validation step. The catch here is 
> that it may require us to instantiate the trigger class.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12095) AutoScalingHandler should validate triggers before updating zookeeper

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12095?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424301#comment-16424301
 ] 

ASF subversion and git services commented on SOLR-12095:


Commit 7c0d50c6c4446e2894b81a3c5c9fdcf045b85c4c in lucene-solr's branch 
refs/heads/branch_7x from [~ab]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=7c0d50c ]

SOLR-12095: AutoScalingHandler should validate triggers before updating 
zookeeper.


> AutoScalingHandler should validate triggers before updating zookeeper
> -
>
> Key: SOLR-12095
> URL: https://issues.apache.org/jira/browse/SOLR-12095
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: AutoScaling, SolrCloud
>Reporter: Shalin Shekhar Mangar
>Assignee: Andrzej Bialecki 
>Priority: Major
> Fix For: 7.4, master (8.0)
>
>
> We validate policy and preferences before updating the configuration in 
> Zookeeper but we don't do that today for triggers. So users can put wrong or 
> unknown parameters and there won't be any complains from the API but the at 
> runtime exceptions will be thrown/logged.
> We should change the trigger API to have a validation step. The catch here is 
> that it may require us to instantiate the trigger class.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12180) Add a metric for number of deleted documents

2018-04-03 Thread Andrzej Bialecki (JIRA)
Andrzej Bialecki  created SOLR-12180:


 Summary: Add a metric for number of deleted documents
 Key: SOLR-12180
 URL: https://issues.apache.org/jira/browse/SOLR-12180
 Project: Solr
  Issue Type: Improvement
  Security Level: Public (Default Security Level. Issues are Public)
  Components: metrics
Reporter: Andrzej Bialecki 
Assignee: Andrzej Bialecki 
 Fix For: 7.4, master (8.0)






--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-MacOSX (64bit/jdk1.8.0) - Build # 4546 - Failure!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-MacOSX/4546/
Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC

All tests passed

Build Log:
[...truncated 1847 lines...]
   [junit4] JVM J1: stdout was not empty, see: 
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/lucene/build/core/test/temp/junit4-J1-20180403_145004_2573674824249463372027.sysout
   [junit4] >>> JVM J1 emitted unexpected output (verbatim) 
   [junit4] codec: FastCompressingStoredFields, pf: BloomFilter, dvf: Memory
   [junit4] <<< JVM J1: EOF 

[...truncated 58947 lines...]
-ecj-javadoc-lint-src:
[mkdir] Created dir: 
/var/folders/qg/h2dfw5s161s51l2bn79mrb7rgn/T/ecj1401601959
 [ecj-lint] Compiling 1169 source files to 
/var/folders/qg/h2dfw5s161s51l2bn79mrb7rgn/T/ecj1401601959
 [ecj-lint] Processing annotations
 [ecj-lint] Annotations processed
 [ecj-lint] Processing annotations
 [ecj-lint] No elements to process
 [ecj-lint] invalid Class-Path header in manifest of jar file: 
/Users/jenkins/.ivy2/cache/org.restlet.jee/org.restlet/jars/org.restlet-2.3.0.jar
 [ecj-lint] invalid Class-Path header in manifest of jar file: 
/Users/jenkins/.ivy2/cache/org.restlet.jee/org.restlet.ext.servlet/jars/org.restlet.ext.servlet-2.3.0.jar
 [ecj-lint] --
 [ecj-lint] 1. ERROR in 
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
 (at line 32)
 [ecj-lint] import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 [ecj-lint]^^^
 [ecj-lint] The import org.apache.solr.client.solrj.cloud.SolrCloudManager is 
never used
 [ecj-lint] --
 [ecj-lint] 2. ERROR in 
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
 (at line 36)
 [ecj-lint] import org.apache.solr.core.SolrResourceLoader;
 [ecj-lint]^^^
 [ecj-lint] The import org.apache.solr.core.SolrResourceLoader is never used
 [ecj-lint] --
 [ecj-lint] --
 [ecj-lint] 3. ERROR in 
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerUtils.java
 (at line 20)
 [ecj-lint] import java.util.Collection;
 [ecj-lint]
 [ecj-lint] The import java.util.Collection is never used
 [ecj-lint] --
 [ecj-lint] 3 problems (3 errors)

BUILD FAILED
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/build.xml:633: The following 
error occurred while executing this line:
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/build.xml:101: The following 
error occurred while executing this line:
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build.xml:685: The 
following error occurred while executing this line:
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/lucene/common-build.xml:2089:
 The following error occurred while executing this line:
/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/lucene/common-build.xml:2128:
 Compile failed; see the compiler error output for details.

Total time: 109 minutes 1 second
Build step 'Invoke Ant' marked build as failure
Archiving artifacts
Setting 
ANT_1_8_2_HOME=/Users/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
[WARNINGS] Skipping publisher since build result is FAILURE
Recording test results
Setting 
ANT_1_8_2_HOME=/Users/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any
Setting 
ANT_1_8_2_HOME=/Users/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/Users/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/Users/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/Users/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

[jira] [Commented] (SOLR-6305) Ability to set the replication factor for index files created by HDFSDirectoryFactory

2018-04-03 Thread Boris Pasko (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-6305?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424254#comment-16424254
 ] 

Boris Pasko commented on SOLR-6305:
---

This bug is not fixed for years now. Solr 6.6.3 exhibits same behavior. 

> Ability to set the replication factor for index files created by 
> HDFSDirectoryFactory
> -
>
> Key: SOLR-6305
> URL: https://issues.apache.org/jira/browse/SOLR-6305
> Project: Solr
>  Issue Type: Improvement
>  Components: hdfs
> Environment: hadoop-2.2.0
>Reporter: Timothy Potter
>Priority: Major
>
> HdfsFileWriter doesn't allow us to create files in HDFS with a different 
> replication factor than the configured DFS default because it uses: 
> {{FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);}}
> Since we have two forms of replication going on when using 
> HDFSDirectoryFactory, it would be nice to be able to set the HDFS replication 
> factor for the Solr directories to a lower value than the default. I realize 
> this might reduce the chance of data locality but since Solr cores each have 
> their own path in HDFS, we should give operators the option to reduce it.
> My original thinking was to just use Hadoop setrep to customize the 
> replication factor, but that's a one-time shot and doesn't affect new files 
> created. For instance, I did:
> {{hadoop fs -setrep -R 1 solr49/coll1}}
> My default dfs replication is set to 3 ^^ I'm setting it to 1 just as an 
> example
> Then added some more docs to the coll1 and did:
> {{hadoop fs -stat %r solr49/hdfs1/core_node1/data/index/segments_3}}
> 3 <-- should be 1
> So it looks like new files don't inherit the repfact from their parent 
> directory.
> Not sure if we need to go as far as allowing different replication factor per 
> collection but that should be considered if possible.
> I looked at the Hadoop 2.2.0 code to see if there was a way to work through 
> this using the Configuration object but nothing jumped out at me ... and the 
> implementation for getServerDefaults(path) is just:
>   public FsServerDefaults getServerDefaults(Path p) throws IOException {
> return getServerDefaults();
>   }
> Path is ignored ;-)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (SOLR-12179) ZKPropertiesWriter error DIH

2018-04-03 Thread Maxence SAUNIER (JIRA)
Maxence SAUNIER created SOLR-12179:
--

 Summary: ZKPropertiesWriter error DIH
 Key: SOLR-12179
 URL: https://issues.apache.org/jira/browse/SOLR-12179
 Project: Solr
  Issue Type: Bug
  Security Level: Public (Default Security Level. Issues are Public)
  Components: contrib - DataImportHandler
Affects Versions: 6.6.1
 Environment: Debian
Solr Cloud
Reporter: Maxence SAUNIER


Hello,
I use Solr Cloud and I test DIH system in cloud, but I have this error :

{quote}
Full Import 
failed:org.apache.solr.handler.dataimport.DataImportHandlerException: Unable to 
PropertyWriter implementation:ZKPropertiesWriter
at 
org.apache.solr.handler.dataimport.DataImporter.createPropertyWriter(DataImporter.java:330)
at 
org.apache.solr.handler.dataimport.DataImporter.doFullImport(DataImporter.java:411)
at 
org.apache.solr.handler.dataimport.DataImporter.runCmd(DataImporter.java:474)
at 
org.apache.solr.handler.dataimport.DataImporter.lambda$runAsync$0(DataImporter.java:457)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NullPointerException
at 
org.apache.solr.handler.dataimport.DocBuilder.loadClass(DocBuilder.java:935)
at 
org.apache.solr.handler.dataimport.DataImporter.createPropertyWriter(DataImporter.java:326)
... 4 more
{quote}

My DIH definition on the cloud
{quote}


 



  

   
   

  

 


{quote}

Call response :
http://localhost:8983/solr/advertisements2/full-advertisements?command=full-import&clean=false&commit=true
{quote}


0
2


true
1

DIH/advertisements.xml


full-import
idle



{quote}

I don't understand why I have this error. Can you help me ?
Thanks you.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-Linux (32bit/jdk1.8.0_162) - Build # 21748 - Failure!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21748/
Java: 32bit/jdk1.8.0_162 -client -XX:+UseParallelGC

1 tests failed.
FAILED:  org.apache.solr.cloud.DeleteReplicaTest.deleteReplicaOnIndexing

Error Message:
Captured an uncaught exception in thread: Thread[id=25279, 
name=updateExecutor-10410-thread-25, state=RUNNABLE, 
group=TGRP-DeleteReplicaTest]

Stack Trace:
com.carrotsearch.randomizedtesting.UncaughtExceptionError: Captured an uncaught 
exception in thread: Thread[id=25279, name=updateExecutor-10410-thread-25, 
state=RUNNABLE, group=TGRP-DeleteReplicaTest]
at 
__randomizedtesting.SeedInfo.seed([738FAC993508FCE1:AF48016969536C2]:0)
Caused by: org.apache.solr.common.SolrException: Replica: 
http://127.0.0.1:34097/solr/deleteReplicaOnIndexing_shard1_replica_n2/ should 
have been marked under leader initiated recovery in ZkController but wasn't.
at __randomizedtesting.SeedInfo.seed([738FAC993508FCE1]:0)
at 
org.apache.solr.cloud.LeaderInitiatedRecoveryThread.run(LeaderInitiatedRecoveryThread.java:90)
at 
com.codahale.metrics.InstrumentedExecutorService$InstrumentedRunnable.run(InstrumentedExecutorService.java:176)
at 
org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:192)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 1844 lines...]
   [junit4] JVM J1: stdout was not empty, see: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/core/test/temp/junit4-J1-20180403_143117_446125282933538772840.sysout
   [junit4] >>> JVM J1 emitted unexpected output (verbatim) 
   [junit4] codec: DummyCompressingStoredFields, pf: 
LuceneVarGapDocFreqInterval, dvf: Memory
   [junit4] <<< JVM J1: EOF 

[...truncated 12110 lines...]
   [junit4] Suite: org.apache.solr.cloud.DeleteReplicaTest
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.DeleteReplicaTest_738FAC993508FCE1-001/init-core-data-001
   [junit4]   2> 1655693 INFO  
(SUITE-DeleteReplicaTest-seed#[738FAC993508FCE1]-worker) [] 
o.a.s.c.MiniSolrCloudCluster Starting cluster of 4 servers in 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.DeleteReplicaTest_738FAC993508FCE1-001/tempDir-001
   [junit4]   2> 1655694 INFO  
(SUITE-DeleteReplicaTest-seed#[738FAC993508FCE1]-worker) [] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 1655694 INFO  (Thread-4157) [] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 1655694 INFO  (Thread-4157) [] o.a.s.c.ZkTestServer 
Starting server
   [junit4]   2> 1655695 ERROR (Thread-4157) [] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 1655794 INFO  
(SUITE-DeleteReplicaTest-seed#[738FAC993508FCE1]-worker) [] 
o.a.s.c.ZkTestServer start zk server on port:42011
   [junit4]   2> 1655797 INFO  (zkConnectionManagerCallback-10340-thread-1) [   
 ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1655800 INFO  (jetty-launcher-10337-thread-1) [] 
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp: 
2017-11-21T11:27:37-10:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
   [junit4]   2> 1655800 INFO  (jetty-launcher-10337-thread-2) [] 
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp: 
2017-11-21T11:27:37-10:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
   [junit4]   2> 1655800 INFO  (jetty-launcher-10337-thread-3) [] 
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp: 
2017-11-21T11:27:37-10:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-4) [] 
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp: 
2017-11-21T11:27:37-10:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-1) [] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-1) [] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-1) [] 
o.e.j.s.session Scavenging every 60ms
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-3) [] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-3) [] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-3) [] 
o.e.j.s.session Scavenging every 60ms
   [junit4]   2> 1655801 INFO  (jetty-launcher-10337-thread-1) [] 
o.e.j.s.h.Cont

Re: Unsubscribe mails

2018-04-03 Thread Erick Erickson
Please follow the instructions here:
http://lucene.apache.org/solr/community.html#mailing-lists-irc. You
must use the _exact_ same e-mail as you used to subscribe.

If the initial try doesn't work and following the suggestions at the
"problems" link doesn't work for you, let us know. But note you need
to show us the _entire_ return header to allow anyone to diagnose the
problem.

Best,
Erick

On Tue, Apr 3, 2018 at 8:02 AM, Sarthak Sugandhi
 wrote:
> Hi Team
>
> I don't want unsubscribe for dev@lucene.apache.org mails.
> Please unsubscribe.
>
> Thanks.
> Sarthak

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[GitHub] lucene-solr issue #313: SOLR-11924: Added a way to create collection set wat...

2018-04-03 Thread HoustonPutman
Github user HoustonPutman commented on the issue:

https://github.com/apache/lucene-solr/pull/313
  
Changed the structure to follow the `LiveNodesListener` convention. 

This includes passing the previous set of collections as well as the new 
list of collections in each notification.


---

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12095) AutoScalingHandler should validate triggers before updating zookeeper

2018-04-03 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12095?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424162#comment-16424162
 ] 

ASF subversion and git services commented on SOLR-12095:


Commit a8b42300c0698b9465f5c1c3565e417d73d9e2ac in lucene-solr's branch 
refs/heads/master from [~ab]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=a8b4230 ]

SOLR-12095: Fix precommit issues.


> AutoScalingHandler should validate triggers before updating zookeeper
> -
>
> Key: SOLR-12095
> URL: https://issues.apache.org/jira/browse/SOLR-12095
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: AutoScaling, SolrCloud
>Reporter: Shalin Shekhar Mangar
>Assignee: Andrzej Bialecki 
>Priority: Major
> Fix For: 7.4, master (8.0)
>
>
> We validate policy and preferences before updating the configuration in 
> Zookeeper but we don't do that today for triggers. So users can put wrong or 
> unknown parameters and there won't be any complains from the API but the at 
> runtime exceptions will be thrown/logged.
> We should change the trigger API to have a validation step. The catch here is 
> that it may require us to instantiate the trigger class.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-9241) Rebalance API for SolrCloud

2018-04-03 Thread JIRA

[ 
https://issues.apache.org/jira/browse/SOLR-9241?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424149#comment-16424149
 ] 

Jan Høydahl commented on SOLR-9241:
---

This patch seems to have a lot of overlap with the new autoscaling feature in 
Solr. Is there any plans to align this effort with what's in Solr 7?

> Rebalance API for SolrCloud
> ---
>
> Key: SOLR-9241
> URL: https://issues.apache.org/jira/browse/SOLR-9241
> Project: Solr
>  Issue Type: New Feature
>  Components: SolrCloud
>Affects Versions: 6.1
> Environment: Ubuntu, Mac OsX
>Reporter: Nitin Sharma
>Priority: Major
>  Labels: Cluster, SolrCloud
> Fix For: 6.1
>
> Attachments: Redistribute_After.jpeg, Redistribute_Before.jpeg, 
> Redistribute_call.jpeg, Replace_After.jpeg, Replace_Before.jpeg, 
> Replace_Call.jpeg, SOLR-9241-4.6.patch, SOLR-9241-6.1.patch
>
>   Original Estimate: 2,016h
>  Remaining Estimate: 2,016h
>
> This is the v1 of the patch for Solrcloud Rebalance api (as described in 
> http://engineering.bloomreach.com/solrcloud-rebalance-api/) , built at 
> Bloomreach by Nitin Sharma and Suruchi Shah. The goal of the API  is to 
> provide a zero downtime mechanism to perform data manipulation and  efficient 
> core allocation in solrcloud. This API was envisioned to be the base layer 
> that enables Solrcloud to be an auto scaling platform. (and work in unison 
> with other complementing monitoring and scaling features).
> Patch Status:
> ===
> The patch is work in progress and incremental. We have done a few rounds of 
> code clean up. We wanted to get the patch going first to get initial feed 
> back.  We will continue to work on making it more open source friendly and 
> easily testable.
>  Deployment Status:
> 
> The platform is deployed in production at bloomreach and has been battle 
> tested for large scale load. (millions of documents and hundreds of 
> collections).
>  Internals:
> =
> The internals of the API and performance : 
> http://engineering.bloomreach.com/solrcloud-rebalance-api/
> It is built on top of the admin collections API as an action (with various 
> flavors). At a high level, the rebalance api provides 2 constructs:
> Scaling Strategy:  Decides how to move the data.  Every flavor has multiple 
> options which can be reviewed in the api spec.
> Re-distribute  - Move around data in the cluster based on capacity/allocation.
> Auto Shard  - Dynamically shard a collection to any size.
> Smart Merge - Distributed Mode - Helps merging data from a larger shard setup 
> into smaller one.  (the source should be divisible by destination)
> Scale up -  Add replicas on the fly
> Scale Down - Remove replicas on the fly
> Allocation Strategy:  Decides where to put the data.  (Nodes with least 
> cores, Nodes that do not have this collection etc). Custom implementations 
> can be built on top as well. One other example is Availability Zone aware. 
> Distribute data such that every replica is placed on different availability 
> zone to support HA.
>  Detailed API Spec:
> 
>   https://github.com/bloomreach/solrcloud-rebalance-api
>  Contributors:
> =
>   Nitin Sharma
>   Suruchi Shah
>  Questions/Comments:
> =
>   You can reach me at nitin...@gmail.com



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Unsubscribe mails

2018-04-03 Thread Sarthak Sugandhi
Hi Team

I don't want unsubscribe for dev@lucene.apache.org mails.
Please unsubscribe.

Thanks.
Sarthak


[jira] [Commented] (LUCENE-7321) Character Mapping

2018-04-03 Thread Alexey Ponomarenko (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-7321?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424132#comment-16424132
 ] 

Alexey Ponomarenko commented on LUCENE-7321:


Hi is an any plan to integrate it to the Lucene\Solr? 

> Character Mapping
> -
>
> Key: LUCENE-7321
> URL: https://issues.apache.org/jira/browse/LUCENE-7321
> Project: Lucene - Core
>  Issue Type: New Feature
>  Components: modules/analysis
>Affects Versions: 4.6.1, 5.4.1, 6.0, 6.0.1
>Reporter: Ivan Provalov
>Priority: Minor
>  Labels: patch
> Fix For: 6.0.1
>
> Attachments: CharacterMappingComponent.pdf, LUCENE-7321.patch
>
>
> One of the challenges in search is recall of an item with a common typing 
> variant.  These cases can be as simple as lower/upper case in most languages, 
> accented characters, or more complex morphological phenomena like prefix 
> omitting, or constructing a character with some combining mark.  This 
> component addresses the cases, which are not covered by ASCII folding 
> component, or more complex to design with other tools.  The idea is that a 
> linguist could provide the mappings in a tab-delimited file, which then can 
> be directly used by Solr.
> The mappings are maintained in the tab-delimited file, which could be just a 
> copy paste from Excel spreadsheet.  This gives the linguists the opportunity 
> to create the mappings, then for the developer to include them in Solr 
> configuration.  There are a few cases, when the mappings grow complex, where 
> some additional debugging may be required.  The mappings can contain any 
> sequence of characters to any other sequence of characters.
> Some of the cases I discuss in detail document are handling the voiced vowels 
> for Japanese; common typing substitutions for Korean, Russian, Polish; 
> transliteration for Polish, Arabic; prefix removal for Arabic; suffix folding 
> for Japanese.  In the appendix, I give an example of implementing a Russian 
> light weight stemmer using this component.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-7.x-Windows (64bit/jdk-10) - Build # 528 - Unstable!

2018-04-03 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Windows/528/
Java: 64bit/jdk-10 -XX:+UseCompressedOops -XX:+UseSerialGC

7 tests failed.
FAILED:  org.apache.solr.handler.TestReplicationHandler.doTestStressReplication

Error Message:
found:2[index.20180403164101550, index.20180403164126127, index.properties, 
replication.properties, snapshot_metadata]

Stack Trace:
java.lang.AssertionError: found:2[index.20180403164101550, 
index.20180403164126127, index.properties, replication.properties, 
snapshot_metadata]
at 
__randomizedtesting.SeedInfo.seed([523EDD6B7C7E9C1E:8995DDAD7956F5AD]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at 
org.apache.solr.handler.TestReplicationHandler.checkForSingleIndex(TestReplicationHandler.java:963)
at 
org.apache.solr.handler.TestReplicationHandler.checkForSingleIndex(TestReplicationHandler.java:934)
at 
org.apache.solr.handler.TestReplicationHandler.doTestStressReplication(TestReplicationHandler.java:910)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$Statemen

[JENKINS] Lucene-Solr-repro - Build # 419 - Unstable

2018-04-03 Thread Apache Jenkins Server
Error processing tokens: Error while parsing action 
'Text/ZeroOrMore/FirstOf/Token/DelimitedToken/DelimitedToken_Action3' at input 
position (line 23, pos 4):
)"}
   ^

java.lang.OutOfMemoryError: Java heap space

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

[jira] [Issue Comment Deleted] (SOLR-12163) Ref Guide: Improve Setting Up an External ZK Ensemble page

2018-04-03 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-12163?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-12163:
-
Comment: was deleted

(was: bq. maybe we just tell windows users to load those variables to 
bin/zkCli.cmd ?

You lost me there - tell them to modify zkCli.cmd? or provide the env file as a 
parameter?)

> Ref Guide: Improve Setting Up an External ZK Ensemble page
> --
>
> Key: SOLR-12163
> URL: https://issues.apache.org/jira/browse/SOLR-12163
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Cassandra Targett
>Assignee: Cassandra Targett
>Priority: Major
> Fix For: 7.4
>
> Attachments: setting-up-an-external-zookeeper-ensemble.adoc
>
>
> I had to set up a ZK ensemble the other day for the first time in a while, 
> and thought I'd test our docs on the subject while I was at it. I headed over 
> to 
> https://lucene.apache.org/solr/guide/setting-up-an-external-zookeeper-ensemble.html,
>  and...Well, I still haven't gotten back to what I was trying to do, but I 
> rewrote the entire page.
> The problem to me is that the page today is mostly a stripped down copy of 
> the ZK Getting Started docs: walking through setting up a single ZK instance 
> before introducing the idea of an ensemble and going back through the same 
> configs again to update them for the ensemble.
> IOW, despite the page being titled "setting up an ensemble", it's mostly 
> about not setting up an ensemble. That's at the end of the page, which itself 
> focuses a bit heavily on the use case of running an ensemble on a single 
> server (so, if you're counting...that's 3 use cases we don't want people to 
> use discussed in detail on a page that's supposedly about _not_ doing any of 
> those things).
> So, I took all of it and restructured the whole thing to focus primarily on 
> the use case we want people to use: running 3 ZK nodes on different machines. 
> Running 3 on one machine is still there, but noted in passing with the 
> appropriate caveats. I've also added information about choosing to use a 
> chroot, which AFAICT was only covered in the section on Taking Solr to 
> Production.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Comment Edited] (SOLR-12163) Ref Guide: Improve Setting Up an External ZK Ensemble page

2018-04-03 Thread Cassandra Targett (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12163?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424087#comment-16424087
 ] 

Cassandra Targett edited comment on SOLR-12163 at 4/3/18 2:20 PM:
--

bq. maybe we just tell windows users to load those variables to bin/zkCli.cmd ?

You lost me there - tell Windows users to modify zkCli.cmd (how?)? or provide 
the env file as a parameter?


was (Author: ctargett):
bq. maybe we just tell windows users to load those variables to bin/zkCli.cmd ?

You lost me there - tell them to modify zkCli.cmd? or provide the env file as a 
parameter?

> Ref Guide: Improve Setting Up an External ZK Ensemble page
> --
>
> Key: SOLR-12163
> URL: https://issues.apache.org/jira/browse/SOLR-12163
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Cassandra Targett
>Assignee: Cassandra Targett
>Priority: Major
> Fix For: 7.4
>
> Attachments: setting-up-an-external-zookeeper-ensemble.adoc
>
>
> I had to set up a ZK ensemble the other day for the first time in a while, 
> and thought I'd test our docs on the subject while I was at it. I headed over 
> to 
> https://lucene.apache.org/solr/guide/setting-up-an-external-zookeeper-ensemble.html,
>  and...Well, I still haven't gotten back to what I was trying to do, but I 
> rewrote the entire page.
> The problem to me is that the page today is mostly a stripped down copy of 
> the ZK Getting Started docs: walking through setting up a single ZK instance 
> before introducing the idea of an ensemble and going back through the same 
> configs again to update them for the ensemble.
> IOW, despite the page being titled "setting up an ensemble", it's mostly 
> about not setting up an ensemble. That's at the end of the page, which itself 
> focuses a bit heavily on the use case of running an ensemble on a single 
> server (so, if you're counting...that's 3 use cases we don't want people to 
> use discussed in detail on a page that's supposedly about _not_ doing any of 
> those things).
> So, I took all of it and restructured the whole thing to focus primarily on 
> the use case we want people to use: running 3 ZK nodes on different machines. 
> Running 3 on one machine is still there, but noted in passing with the 
> appropriate caveats. I've also added information about choosing to use a 
> chroot, which AFAICT was only covered in the section on Taking Solr to 
> Production.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12163) Ref Guide: Improve Setting Up an External ZK Ensemble page

2018-04-03 Thread Cassandra Targett (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12163?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424086#comment-16424086
 ] 

Cassandra Targett commented on SOLR-12163:
--

bq. maybe we just tell windows users to load those variables to bin/zkCli.cmd ?

You lost me there - tell them to modify zkCli.cmd? or provide the env file as a 
parameter?

> Ref Guide: Improve Setting Up an External ZK Ensemble page
> --
>
> Key: SOLR-12163
> URL: https://issues.apache.org/jira/browse/SOLR-12163
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Cassandra Targett
>Assignee: Cassandra Targett
>Priority: Major
> Fix For: 7.4
>
> Attachments: setting-up-an-external-zookeeper-ensemble.adoc
>
>
> I had to set up a ZK ensemble the other day for the first time in a while, 
> and thought I'd test our docs on the subject while I was at it. I headed over 
> to 
> https://lucene.apache.org/solr/guide/setting-up-an-external-zookeeper-ensemble.html,
>  and...Well, I still haven't gotten back to what I was trying to do, but I 
> rewrote the entire page.
> The problem to me is that the page today is mostly a stripped down copy of 
> the ZK Getting Started docs: walking through setting up a single ZK instance 
> before introducing the idea of an ensemble and going back through the same 
> configs again to update them for the ensemble.
> IOW, despite the page being titled "setting up an ensemble", it's mostly 
> about not setting up an ensemble. That's at the end of the page, which itself 
> focuses a bit heavily on the use case of running an ensemble on a single 
> server (so, if you're counting...that's 3 use cases we don't want people to 
> use discussed in detail on a page that's supposedly about _not_ doing any of 
> those things).
> So, I took all of it and restructured the whole thing to focus primarily on 
> the use case we want people to use: running 3 ZK nodes on different machines. 
> Running 3 on one machine is still there, but noted in passing with the 
> appropriate caveats. I've also added information about choosing to use a 
> chroot, which AFAICT was only covered in the section on Taking Solr to 
> Production.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-12163) Ref Guide: Improve Setting Up an External ZK Ensemble page

2018-04-03 Thread Cassandra Targett (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12163?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424087#comment-16424087
 ] 

Cassandra Targett commented on SOLR-12163:
--

bq. maybe we just tell windows users to load those variables to bin/zkCli.cmd ?

You lost me there - tell them to modify zkCli.cmd? or provide the env file as a 
parameter?

> Ref Guide: Improve Setting Up an External ZK Ensemble page
> --
>
> Key: SOLR-12163
> URL: https://issues.apache.org/jira/browse/SOLR-12163
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: documentation
>Reporter: Cassandra Targett
>Assignee: Cassandra Targett
>Priority: Major
> Fix For: 7.4
>
> Attachments: setting-up-an-external-zookeeper-ensemble.adoc
>
>
> I had to set up a ZK ensemble the other day for the first time in a while, 
> and thought I'd test our docs on the subject while I was at it. I headed over 
> to 
> https://lucene.apache.org/solr/guide/setting-up-an-external-zookeeper-ensemble.html,
>  and...Well, I still haven't gotten back to what I was trying to do, but I 
> rewrote the entire page.
> The problem to me is that the page today is mostly a stripped down copy of 
> the ZK Getting Started docs: walking through setting up a single ZK instance 
> before introducing the idea of an ensemble and going back through the same 
> configs again to update them for the ensemble.
> IOW, despite the page being titled "setting up an ensemble", it's mostly 
> about not setting up an ensemble. That's at the end of the page, which itself 
> focuses a bit heavily on the use case of running an ensemble on a single 
> server (so, if you're counting...that's 3 use cases we don't want people to 
> use discussed in detail on a page that's supposedly about _not_ doing any of 
> those things).
> So, I took all of it and restructured the whole thing to focus primarily on 
> the use case we want people to use: running 3 ZK nodes on different machines. 
> Running 3 on one machine is still there, but noted in passing with the 
> appropriate caveats. I've also added information about choosing to use a 
> chroot, which AFAICT was only covered in the section on Taking Solr to 
> Production.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8226) Don't use MemoryCodec for nightly runs of TestIndexSorting

2018-04-03 Thread Alan Woodward (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8226?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424079#comment-16424079
 ] 

Alan Woodward commented on LUCENE-8226:
---

Another nightly failure, this time due to test timeouts: 
https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/190/

No memory codecs in here, just lots and lots of docs slowing everything down - 
possibly with lots of merges as well?  It looks as though it's still setting 
the test up when it times out.

> Don't use MemoryCodec for nightly runs of TestIndexSorting
> --
>
> Key: LUCENE-8226
> URL: https://issues.apache.org/jira/browse/LUCENE-8226
> Project: Lucene - Core
>  Issue Type: Task
>Reporter: Alan Woodward
>Priority: Major
> Attachments: LUCENE-8226.patch, LUCENE-8226.patch
>
>
> Nightly runs of TestIndexSorting fail occasionally with OOM (see 
> [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/183/] for a 
> recent example, and it's been appearing in Erick's BadApple report too).  It 
> looks as this is normally due to the combination of a large docset and 
> MemoryCodec.  We should suppress MemoryCodec for these tests, on nightly runs 
> only if possible)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



Re: VOTE: Apache Solr Reference Guide for Solr 7.3 RC1

2018-04-03 Thread Mikhail Khludnev
I've looked through recent changes in PDF. It seems good.

On Tue, Apr 3, 2018 at 4:32 PM, Cassandra Targett 
wrote:

> Reminder about this.
>
> It looks like the Lucene/Solr release vote is going to pass, so we could
> have both released at about the same time.
>
> Thanks,
> Cassandra
>
> On Thu, Mar 29, 2018 at 10:49 AM, Cassandra Targett  > wrote:
>
>> Please vote to release the Apache Solr Reference Guide for Solr 7.3.
>>
>> The artifacts can be downloaded from:
>> https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide
>> /apache-solr-ref-guide-7.3-RC1/
>>
>> $ cat apache-solr-ref-guide-7.3.pdf.sha1
>> 151f06d920d1ac41564f3c0ddabae3c2c36b6892  apache-solr-ref-guide-7.3.pdf
>>
>> The HTML version has also been uploaded to the website:
>> https://lucene.apache.org/solr/guide/7_3/
>>
>> Here's my +1.
>>
>> If it happens that this vote passes before the vote for the final
>> Lucene/Solr RC is complete, I'll hold release/announcement of the Ref Guide
>> until the vote is complete and the release steps are finished.
>>
>> Thanks,
>> Cassandra
>>
>
>


-- 
Sincerely yours
Mikhail Khludnev


[jira] [Commented] (SOLR-12177) Zorlani search engine

2018-04-03 Thread Steve Rowe (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-12177?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16424070#comment-16424070
 ] 

Steve Rowe commented on SOLR-12177:
---

I plan on deleting this issue later today if no more details are posted here.  
(At this point this looks to me like an advertizement, not a "new feature".)

> Zorlani search engine
> -
>
> Key: SOLR-12177
> URL: https://issues.apache.org/jira/browse/SOLR-12177
> Project: Solr
>  Issue Type: New Feature
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: config-api
>Affects Versions: 7.2
> Environment: search engine
>Reporter: fabrice
>Priority: Major
>  Labels: security
> Fix For: 5.5.6
>
>
> Search engine



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-NightlyTests-7.x - Build # 190 - Still unstable

2018-04-03 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/190/

2 tests failed.
FAILED:  org.apache.lucene.index.TestIndexSorting.testRandom3

Error Message:
Test abandoned because suite timeout was reached.

Stack Trace:
java.lang.Exception: Test abandoned because suite timeout was reached.
at __randomizedtesting.SeedInfo.seed([AA3475BD7E8C96D9]:0)


FAILED:  junit.framework.TestSuite.org.apache.lucene.index.TestIndexSorting

Error Message:
Suite timeout exceeded (>= 720 msec).

Stack Trace:
java.lang.Exception: Suite timeout exceeded (>= 720 msec).
at __randomizedtesting.SeedInfo.seed([AA3475BD7E8C96D9]:0)




Build Log:
[...truncated 1630 lines...]
   [junit4] JVM J2: stdout was not empty, see: 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/build/core/test/temp/junit4-J2-20180403_085635_1795743216079533742227.sysout
   [junit4] >>> JVM J2 emitted unexpected output (verbatim) 
   [junit4] codec: CheapBastard, pf: Lucene50, dvf: Direct
   [junit4] <<< JVM J2: EOF 

[...truncated 53 lines...]
   [junit4] Suite: org.apache.lucene.index.TestIndexSorting
   [junit4]   2> Aib 03, 2018 5:56:37 P.M. 
com.carrotsearch.randomizedtesting.ThreadLeakControl$2 evaluate
   [junit4]   2> WARNING: Suite execution timed out: 
org.apache.lucene.index.TestIndexSorting
   [junit4]   2>1) Thread[id=16, 
name=TEST-TestIndexSorting.testRandom3-seed#[AA3475BD7E8C96D9], state=RUNNABLE, 
group=TGRP-TestIndexSorting]
   [junit4]   2> at sun.nio.ch.NativeThread.current(Native Method)
   [junit4]   2> at 
sun.nio.ch.NativeThreadSet.add(NativeThreadSet.java:46)
   [junit4]   2> at 
sun.nio.ch.FileChannelImpl.readInternal(FileChannelImpl.java:737)
   [junit4]   2> at 
sun.nio.ch.FileChannelImpl.read(FileChannelImpl.java:727)
   [junit4]   2> at 
org.apache.lucene.mockfile.FilterFileChannel.read(FilterFileChannel.java:111)
   [junit4]   2> at 
org.apache.lucene.mockfile.FilterFileChannel.read(FilterFileChannel.java:111)
   [junit4]   2> at 
org.apache.lucene.mockfile.FilterFileChannel.read(FilterFileChannel.java:111)
   [junit4]   2> at 
org.apache.lucene.store.NIOFSDirectory$NIOFSIndexInput.readInternal(NIOFSDirectory.java:179)
   [junit4]   2> at 
org.apache.lucene.store.BufferedIndexInput.refill(BufferedIndexInput.java:342)
   [junit4]   2> at 
org.apache.lucene.store.BufferedIndexInput.readByte(BufferedIndexInput.java:269)
   [junit4]   2> at 
org.apache.lucene.util.packed.DirectReader$DirectPackedReader8.get(DirectReader.java:145)
   [junit4]   2> at 
org.apache.lucene.codecs.lucene70.Lucene70DocValuesProducer$24.nextOrd(Lucene70DocValuesProducer.java:1406)
   [junit4]   2> at 
org.apache.lucene.index.AssertingLeafReader$AssertingSortedSetDocValues.nextOrd(AssertingLeafReader.java:841)
   [junit4]   2> at 
org.apache.lucene.codecs.DocValuesConsumer$5$1.nextOrd(DocValuesConsumer.java:761)
   [junit4]   2> at 
org.apache.lucene.codecs.lucene70.Lucene70DocValuesConsumer.addSortedSetField(Lucene70DocValuesConsumer.java:606)
   [junit4]   2> at 
org.apache.lucene.codecs.asserting.AssertingDocValuesFormat$AssertingDocValuesConsumer.addSortedSetField(AssertingDocValuesFormat.java:209)
   [junit4]   2> at 
org.apache.lucene.codecs.DocValuesConsumer.mergeSortedSetField(DocValuesConsumer.java:695)
   [junit4]   2> at 
org.apache.lucene.codecs.DocValuesConsumer.merge(DocValuesConsumer.java:141)
   [junit4]   2> at 
org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat$FieldsWriter.merge(PerFieldDocValuesFormat.java:151)
   [junit4]   2> at 
org.apache.lucene.index.SegmentMerger.mergeDocValues(SegmentMerger.java:181)
   [junit4]   2> at 
org.apache.lucene.index.SegmentMerger.merge(SegmentMerger.java:125)
   [junit4]   2> at 
org.apache.lucene.index.IndexWriter.mergeMiddle(IndexWriter.java:4479)
   [junit4]   2> at 
org.apache.lucene.index.IndexWriter.merge(IndexWriter.java:4140)
   [junit4]   2> at 
org.apache.lucene.index.SerialMergeScheduler.merge(SerialMergeScheduler.java:40)
   [junit4]   2> at 
org.apache.lucene.index.IndexWriter.maybeMerge(IndexWriter.java:2334)
   [junit4]   2> at 
org.apache.lucene.index.IndexWriter.processEvents(IndexWriter.java:5146)
   [junit4]   2> at 
org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1778)
   [junit4]   2> at 
org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1467)
   [junit4]   2> at 
org.apache.lucene.index.TestIndexSorting.testRandom3(TestIndexSorting.java:2299)
   [junit4]   2> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native 
Method)
   [junit4]   2> at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
   [junit4]   2> at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccesso

  1   2   >