[JENKINS] Lucene-Solr-7.x-MacOSX (64bit/jdk-9) - Build # 393 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-MacOSX/393/
Java: 64bit/jdk-9 -XX:-UseCompressedOops -XX:+UseParallelGC

9 tests failed.
FAILED:  
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testWithSameTermQuery
 {p0=stored,indexed,tokenized}

Error Message:
arrays first differed at element [0]; 
expected:<...,yin[10-13]}score=2.[496421]3]> but 
was:<...,yin[10-13]}score=2.[068500]3]>

Stack Trace:
arrays first differed at element [0]; 
expected:<...,yin[10-13]}score=2.[496421]3]> but 
was:<...,yin[10-13]}score=2.[068500]3]>
at 
__randomizedtesting.SeedInfo.seed([F4F3414FDADD2608:3E880C9FF5516FF7]:0)
at 
org.junit.internal.ComparisonCriteria.arrayEquals(ComparisonCriteria.java:52)
at org.junit.Assert.internalArrayEquals(Assert.java:416)
at org.junit.Assert.assertArrayEquals(Assert.java:168)
at org.junit.Assert.assertArrayEquals(Assert.java:185)
at 
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testWithSameTermQuery(TestUnifiedHighlighterStrictPhrases.java:165)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:844)


FAILED:  
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testSubPhrases
 {p0=stored,indexed,tokenized}

Error Message:
arrays first differed at element [0]; 

[jira] [Updated] (SOLR-11795) Add Solr metrics exporter for Prometheus

2018-01-10 Thread Minoru Osuka (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11795?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Minoru Osuka updated SOLR-11795:

Attachment: SOLR-11795-3.patch

Attach new patch file.

> Add Solr metrics exporter for Prometheus
> 
>
> Key: SOLR-11795
> URL: https://issues.apache.org/jira/browse/SOLR-11795
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: metrics
>Affects Versions: 7.2
>Reporter: Minoru Osuka
>Assignee: Koji Sekiguchi
>Priority: Minor
> Attachments: SOLR-11795-2.patch, SOLR-11795-3.patch, 
> SOLR-11795.patch, solr-dashboard.png, solr-exporter-diagram.png
>
>
> I 'd like to monitor Solr using Prometheus and Grafana.
> I've already created Solr metrics exporter for Prometheus. I'd like to 
> contribute to contrib directory if you don't mind.
> !solr-exporter-diagram.png|thumbnail!
> !solr-dashboard.png|thumbnail!



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-SmokeRelease-7.x - Build # 114 - Still Failing

2018-01-10 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-7.x/114/

No tests ran.

Build Log:
[...truncated 28286 lines...]
prepare-release-no-sign:
[mkdir] Created dir: 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist
 [copy] Copying 491 files to 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/lucene
 [copy] Copying 215 files to 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/solr
   [smoker] Java 1.8 JAVA_HOME=/home/jenkins/tools/java/latest1.8
   [smoker] NOTE: output encoding is UTF-8
   [smoker] 
   [smoker] Load release URL 
"file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/"...
   [smoker] 
   [smoker] Test Lucene...
   [smoker]   test basics...
   [smoker]   get KEYS
   [smoker] 0.2 MB in 0.07 sec (3.4 MB/sec)
   [smoker]   check changes HTML...
   [smoker]   download lucene-7.3.0-src.tgz...
   [smoker] 31.6 MB in 0.15 sec (215.0 MB/sec)
   [smoker] verify md5/sha1 digests
   [smoker]   download lucene-7.3.0.tgz...
   [smoker] 73.0 MB in 0.12 sec (615.5 MB/sec)
   [smoker] verify md5/sha1 digests
   [smoker]   download lucene-7.3.0.zip...
   [smoker] 83.5 MB in 0.24 sec (350.0 MB/sec)
   [smoker] verify md5/sha1 digests
   [smoker]   unpack lucene-7.3.0.tgz...
   [smoker] verify JAR metadata/identity/no javax.* or java.* classes...
   [smoker] test demo with 1.8...
   [smoker]   got 6282 hits for query "lucene"
   [smoker] checkindex with 1.8...
   [smoker] check Lucene's javadoc JAR
   [smoker]   unpack lucene-7.3.0.zip...
   [smoker] verify JAR metadata/identity/no javax.* or java.* classes...
   [smoker] test demo with 1.8...
   [smoker]   got 6282 hits for query "lucene"
   [smoker] checkindex with 1.8...
   [smoker] check Lucene's javadoc JAR
   [smoker]   unpack lucene-7.3.0-src.tgz...
   [smoker] make sure no JARs/WARs in src dist...
   [smoker] run "ant validate"
   [smoker] run tests w/ Java 8 and testArgs='-Dtests.slow=false'...
   [smoker] test demo with 1.8...
   [smoker]   got 215 hits for query "lucene"
   [smoker] checkindex with 1.8...
   [smoker] generate javadocs w/ Java 8...
   [smoker] 
   [smoker] Crawl/parse...
   [smoker] 
   [smoker] Verify...
   [smoker]   confirm all releases have coverage in TestBackwardsCompatibility
   [smoker] find all past Lucene releases...
   [smoker] run TestBackwardsCompatibility..
   [smoker] success!
   [smoker] 
   [smoker] Test Solr...
   [smoker]   test basics...
   [smoker]   get KEYS
   [smoker] 0.2 MB in 0.03 sec (9.1 MB/sec)
   [smoker]   check changes HTML...
   [smoker]   download solr-7.3.0-src.tgz...
   [smoker] 53.9 MB in 0.67 sec (80.0 MB/sec)
   [smoker] verify md5/sha1 digests
   [smoker]   download solr-7.3.0.tgz...
   [smoker] 150.2 MB in 1.92 sec (78.4 MB/sec)
   [smoker] verify md5/sha1 digests
   [smoker]   download solr-7.3.0.zip...
   [smoker] 151.2 MB in 1.09 sec (139.0 MB/sec)
   [smoker] verify md5/sha1 digests
   [smoker]   unpack solr-7.3.0.tgz...
   [smoker] verify JAR metadata/identity/no javax.* or java.* classes...
   [smoker] unpack lucene-7.3.0.tgz...
   [smoker]   **WARNING**: skipping check of 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0/contrib/dataimporthandler-extras/lib/javax.mail-1.5.1.jar:
 it has javax.* classes
   [smoker]   **WARNING**: skipping check of 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0/contrib/dataimporthandler-extras/lib/activation-1.1.1.jar:
 it has javax.* classes
   [smoker] copying unpacked distribution for Java 8 ...
   [smoker] test solr example w/ Java 8...
   [smoker]   start Solr instance 
(log=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0-java8/solr-example.log)...
   [smoker] No process found for Solr node running on port 8983
   [smoker]   Running techproducts example on port 8983 from 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0-java8
   [smoker] *** [WARN] *** Your open file limit is currently 6.  
   [smoker]  It should be set to 65000 to avoid operational impariment. 
   [smoker]  If you no longer wish to see this warning, set SOLR_ULIMIT_CHECKS 
to false in your profile or solr.in.sh
   [smoker] *** [WARN] ***  Your Max Processes Limit is currently 10240. 
   [smoker]  It should be set to 65000 to avoid operational impariment. 
   [smoker]  If you no longer wish to see this warning, set SOLR_ULIMIT_CHECKS 
to false in your profile or solr.in.sh
   [smoker] Creating Solr home directory 

[JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-10-ea+37) - Build # 1162 - Failure!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1162/
Java: 64bit/jdk-10-ea+37 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC

9 tests failed.
FAILED:  
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testWithSameTermQuery
 {p0=stored,indexed,tokenized}

Error Message:
arrays first differed at element [0]; 
expected:<...,yin[10-13]}score=2.[496421]3]> but 
was:<...,yin[10-13]}score=2.[068500]3]>

Stack Trace:
arrays first differed at element [0]; 
expected:<...,yin[10-13]}score=2.[496421]3]> but 
was:<...,yin[10-13]}score=2.[068500]3]>
at 
__randomizedtesting.SeedInfo.seed([1F257E208B3AD702:D55E33F0A4B69EFD]:0)
at 
org.junit.internal.ComparisonCriteria.arrayEquals(ComparisonCriteria.java:52)
at org.junit.Assert.internalArrayEquals(Assert.java:416)
at org.junit.Assert.assertArrayEquals(Assert.java:168)
at org.junit.Assert.assertArrayEquals(Assert.java:185)
at 
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testWithSameTermQuery(TestUnifiedHighlighterStrictPhrases.java:165)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:844)


FAILED:  
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testSubPhrases
 {p0=stored,indexed,tokenized}

Error Message:
arrays first differed at element [0]; 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread Ignacio Vera (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321747#comment-16321747
 ] 

Ignacio Vera commented on LUCENE-8126:
--

Note that S2 geometry has 6-arity for the first level, after that divides every 
cell in 4 so it has in fact 4-arity.

[~daddywri] : I have added in the pull request a new Shape (GeoS2shape) which 
is a very fast implementation of a 4 points polygon. I do not perform any 
argument checking, is that ok? purpose of the shape is speed. In addition I 
have implemented it as a polygon and added a method in the polygon factory, is 
that approach ok?







> Spatial prefix tree based on S2 geometry
> 
>
> Key: LUCENE-8126
> URL: https://issues.apache.org/jira/browse/LUCENE-8126
> Project: Lucene - Core
>  Issue Type: New Feature
>  Components: modules/spatial-extras
>Reporter: Ignacio Vera
>
> Hi [~dsmiley],
> I have been working on a prefix tree based on goggle S2 geometry 
> (https://s2geometry.io/) to be used mainly with Geo3d shapes with very 
> promising results, in particular for complex shapes (e.g polygons). Using 
> this pixelization scheme reduces the size of the index, improves the 
> performance of the queries and reduces the loading time for non-point shapes. 
> If you are ok with this contribution and before providing any code I would 
> like to understand what is the correct/prefered approach:
> 1) Add new depency to the S2 library 
> (https://mvnrepository.com/artifact/io.sgr/s2-geometry-library-java). It has 
> Apache 2.0 license so it should be ok.
> 2) Create a utility class with all methods necessary to navigate the S2 tree 
> and create shapes from S2 cells (basically port what we need from the library 
> into Lucene).
> What do you think?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-MacOSX (64bit/jdk1.8.0) - Build # 4379 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-MacOSX/4379/
Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseSerialGC

10 tests failed.
FAILED:  
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testSubPhrases
 
{p0=stored,indexed,tokenized,termVector,indexOptions=DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}

Error Message:
arrays first differed at element [0]; 
expected:<...harlie[12-19]}score=[3.931102]]> but 
was:<...harlie[12-19]}score=[2.723861]]>

Stack Trace:
arrays first differed at element [0]; 
expected:<...harlie[12-19]}score=[3.931102]]> but 
was:<...harlie[12-19]}score=[2.723861]]>
at 
__randomizedtesting.SeedInfo.seed([B47B2A62567237AF:EE4821997BE73AF]:0)
at 
org.junit.internal.ComparisonCriteria.arrayEquals(ComparisonCriteria.java:52)
at org.junit.Assert.internalArrayEquals(Assert.java:416)
at org.junit.Assert.assertArrayEquals(Assert.java:168)
at org.junit.Assert.assertArrayEquals(Assert.java:185)
at 
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testSubPhrases(TestUnifiedHighlighterStrictPhrases.java:210)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  
org.apache.lucene.search.uhighlight.TestUnifiedHighlighterStrictPhrases.testWithSameTermQuery
 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321726#comment-16321726
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user iverase commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160868387
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = 

[GitHub] lucene-solr pull request #302: LUCENE-8126: Spatial prefix tree based on S2 ...

2018-01-10 Thread iverase
Github user iverase commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160868387
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = new BytesRef();
+}
+getBytesRefFromS2CellId(cellId, result);
+return result;
+}
+
+@Override
+public int getLevel() {
+return this.level;
+}
+
+/**

[jira] [Commented] (SOLR-11770) NPE in tvrh if no field is specified and document doesn't contain any fields with term vectors

2018-01-10 Thread Erick Erickson (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11770?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321719#comment-16321719
 ] 

Erick Erickson commented on SOLR-11770:
---

On another note:

If I switch my  field from stored="true", docValues="false" to 
stored="false", docValues="true" the output changes. It seems like having the 
ID field present in docValues should allow it to be returned.

With stored=false, docValues=true:
  "termVectors":[]}


With stored=true, docValues=false:
"termVectors":[
"GB18030TEST",[
  "uniqueKey","GB18030TEST"],
"SP2514N",[
  "uniqueKey","SP2514N"],
"6H500F0",[
  "uniqueKey","6H500F0"],
"F8V7067-APL-KIT",[
  "uniqueKey","F8V7067-APL-KIT"],
"IW-02",[
  "uniqueKey","IW-02"],
"MA147LL/A",[
  "uniqueKey","MA147LL/A",
  "includes",[
"cable",[],
"earbud",[],
"headphones",[],
"usb",[]]],
"adata",[
  "uniqueKey","adata"],
"apple",[
  "uniqueKey","apple"],
"asus",[
  "uniqueKey","asus"],
"ati",[
  "uniqueKey","ati"]]}



> NPE in tvrh if no field is specified and document doesn't contain any fields 
> with term vectors
> --
>
> Key: SOLR-11770
> URL: https://issues.apache.org/jira/browse/SOLR-11770
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>Affects Versions: 6.6.2
>Reporter: Nikolay Martynov
>Assignee: Erick Erickson
>
> It looks like if {{tvrh}} request doesn't contain {{fl}} parameter and 
> document doesn't have any fields with term vectors then Solr returns NPE.
> Request: 
> {{tvrh?shards.qt=/tvrh=field%3Avalue=json=id%3A123=true}}.
> On our 'old' schema we had some fields with {{termVectors}} and even more 
> fields with position data. In our new schema we tried to remove unused data 
> so we dropped a lot of position data and some term vectors.
> Our documents are 'sparsely' populated - not all documents contain all fields.
> Above request was returning fine for our 'old' schema and returns 500 for our 
> 'new' schema - on exactly same Solr (6.6.2).
> Stack trace:
> {code}
> 2017-12-18 01:15:00.958 ERROR (qtp255041198-46697) [c:test s:shard3 
> r:core_node11 x:test_shard3_replica1] o.a.s.h.RequestHandlerBase 
> java.lang.NullPointerException
>at 
> org.apache.solr.handler.component.TermVectorComponent.process(TermVectorComponent.java:324)
>at 
> org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:296)
>at 
> org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:173)
>at org.apache.solr.core.SolrCore.execute(SolrCore.java:2482)
>at org.apache.solr.servlet.HttpSolrCall.execute(HttpSolrCall.java:723)
>at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:529)
>at 
> org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:361)
>at 
> org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:305)
>at 
> org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1691)
>at 
> org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:582)
>at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
>at 
> org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:548)
>at 
> org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:226)
>at 
> org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1180)
>at 
> org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:512)
>at 
> org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185)
>at 
> org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1112)
>at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
>at 
> org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:213)
>at 
> org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:119)
>at 
> org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
>at 
> org.eclipse.jetty.rewrite.handler.RewriteHandler.handle(RewriteHandler.java:335)
>at 
> org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
>at org.eclipse.jetty.server.Server.handle(Server.java:534)
>at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:320)
>at 
> org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:251)
>at 
> 

[JENKINS] Lucene-Solr-7.2-Linux (64bit/jdk1.8.0_144) - Build # 121 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.2-Linux/121/
Java: 64bit/jdk1.8.0_144 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC

1 tests failed.
FAILED:  
org.apache.solr.handler.TestReplicationHandler.doTestIndexAndConfigReplication

Error Message:
Index: 0, Size: 0

Stack Trace:
java.lang.IndexOutOfBoundsException: Index: 0, Size: 0
at 
__randomizedtesting.SeedInfo.seed([6716DDEFF3E05EF3:735E86BAD0E7E3ED]:0)
at java.util.ArrayList.rangeCheck(ArrayList.java:653)
at java.util.ArrayList.get(ArrayList.java:429)
at 
org.apache.solr.handler.TestReplicationHandler.doTestIndexAndConfigReplication(TestReplicationHandler.java:561)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 11725 lines...]
   [junit4] Suite: org.apache.solr.handler.TestReplicationHandler
   [junit4]   2> 200686 INFO  
(SUITE-TestReplicationHandler-seed#[6716DDEFF3E05EF3]-worker) [] 
o.a.s.SolrTestCaseJ4 SecureRandom 

[jira] [Resolved] (LUCENE-2287) Unexpected terms are highlighted within nested SpanQuery instances

2018-01-10 Thread David Smiley (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-2287?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

David Smiley resolved LUCENE-2287.
--
Resolution: Won't Fix

It's very debatable how to close this... I'm setting to "Won't Fix" since the 
extensive patch Michael Goddard supplied towards the original Highlighter was 
not applied to it.  LUCENE-8121 addresses the issue in the UnifiedHighlighter.  
I closed duplicated issues for the original Highlighter as "Duplicate".  I 
ported the test in LUCENE-5455 to LUCENE-8121 which I felt adequately revealed 
the problem that is fixed; no more is necessary IMO.

> Unexpected terms are highlighted within nested SpanQuery instances
> --
>
> Key: LUCENE-2287
> URL: https://issues.apache.org/jira/browse/LUCENE-2287
> Project: Lucene - Core
>  Issue Type: Improvement
>  Components: modules/highlighter
>Affects Versions: 2.9.1
> Environment: Linux, Solaris, Windows
>Reporter: Michael Goddard
>Assignee: David Smiley
>Priority: Minor
> Fix For: 7.3
>
> Attachments: LUCENE-2287.patch, LUCENE-2287.patch, LUCENE-2287.patch, 
> LUCENE-2287.patch, LUCENE-2287.patch, LUCENE-2287.patch
>
>   Original Estimate: 336h
>  Remaining Estimate: 336h
>
> I haven't yet been able to resolve why I'm seeing spurious highlighting in 
> nested SpanQuery instances.  Briefly, the issue is illustrated by the second 
> instance of "Lucene" being highlighted in the test below, when it doesn't 
> satisfy the inner span.  There's been some discussion about this on the 
> java-dev list, and I'm opening this issue now because I have made some 
> initial progress on this.
> This new test, added to the  HighlighterTest class in lucene_2_9_1, 
> illustrates this:
> /*
>  * Ref: http://www.lucidimagination.com/blog/2009/07/18/the-spanquery/
>  */
> public void testHighlightingNestedSpans2() throws Exception {
>   String theText = "The Lucene was made by Doug Cutting and Lucene great 
> Hadoop was"; // Problem
>   //String theText = "The Lucene was made by Doug Cutting and the great 
> Hadoop was"; // Works okay
>   String fieldName = "SOME_FIELD_NAME";
>   SpanNearQuery spanNear = new SpanNearQuery(new SpanQuery[] {
> new SpanTermQuery(new Term(fieldName, "lucene")),
> new SpanTermQuery(new Term(fieldName, "doug")) }, 5, true);
>   Query query = new SpanNearQuery(new SpanQuery[] { spanNear,
> new SpanTermQuery(new Term(fieldName, "hadoop")) }, 4, true);
>   String expected = "The Lucene was made by Doug Cutting and 
> Lucene great Hadoop was";
>   //String expected = "The Lucene was made by Doug Cutting and 
> the great Hadoop was";
>   String observed = highlightField(query, fieldName, theText);
>   System.out.println("Expected: \"" + expected + "\n" + "Observed: \"" + 
> observed);
>   assertEquals("Why is that second instance of the term \"Lucene\" 
> highlighted?", expected, observed);
> }
> Is this an issue that's arisen before?  I've been reading through the source 
> to QueryScorer, WeightedSpanTerm, WeightedSpanTermExtractor, Spans, and 
> NearSpansOrdered, but haven't found the solution yet.  Initially, I thought 
> that the extractWeightedSpanTerms method in WeightedSpanTermExtractor should 
> be called on each clause of a SpanNearQuery or SpanOrQuery, but that didn't 
> get me too far.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (LUCENE-8121) UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched positions

2018-01-10 Thread David Smiley (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-8121?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

David Smiley resolved LUCENE-8121.
--
Resolution: Fixed

> UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched 
> positions
> -
>
> Key: LUCENE-8121
> URL: https://issues.apache.org/jira/browse/LUCENE-8121
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/highlighter
>Reporter: David Smiley
>Assignee: David Smiley
>Priority: Minor
> Fix For: 7.3
>
> Attachments: LUCENE-2287_UH_SpanCollector.patch, 
> LUCENE-2287_UH_SpanCollector.patch
>
>
> The UnifiedHighlighter (and original Highlighter) highlight phrases by 
> converting to a SpanQuery and using the Spans start and end positions to 
> assume that every occurrence of the underlying terms between those positions 
> are to be highlighted.  But this is inaccurate; see LUCENE-5455 for a good 
> example, and also LUCENE-2287.  The solution is to use the SpanCollector API 
> which was introduced after the phrase matching aspects of those highlighters 
> were developed. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8121) UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched positions

2018-01-10 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8121?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321680#comment-16321680
 ] 

ASF subversion and git services commented on LUCENE-8121:
-

Commit 57e571559495e1aba4f8f345b06bcdbbcf5bd1db in lucene-solr's branch 
refs/heads/branch_7x from [~dsmiley]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=57e5715 ]

LUCENE-8121: UH switch to SpanCollector API. Better accuracy.
* Use the filtered freq in position sensitive terms (better scores)
* Refactored UH's OffsetsEnum
* Improved test randomization in TestUnifiedHighlighter & MTQ

(cherry picked from commit 352ec01a6ef68bc81fdb84a7f72e81a6698f594c)

# Conflicts:
#   
lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
#   
lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterStrictPhrases.java


> UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched 
> positions
> -
>
> Key: LUCENE-8121
> URL: https://issues.apache.org/jira/browse/LUCENE-8121
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/highlighter
>Reporter: David Smiley
>Assignee: David Smiley
>Priority: Minor
> Fix For: 7.3
>
> Attachments: LUCENE-2287_UH_SpanCollector.patch, 
> LUCENE-2287_UH_SpanCollector.patch
>
>
> The UnifiedHighlighter (and original Highlighter) highlight phrases by 
> converting to a SpanQuery and using the Spans start and end positions to 
> assume that every occurrence of the underlying terms between those positions 
> are to be highlighted.  But this is inaccurate; see LUCENE-5455 for a good 
> example, and also LUCENE-2287.  The solution is to use the SpanCollector API 
> which was introduced after the phrase matching aspects of those highlighters 
> were developed. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-7.x-Windows (32bit/jdk1.8.0_144) - Build # 394 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Windows/394/
Java: 32bit/jdk1.8.0_144 -client -XX:+UseSerialGC

3 tests failed.
FAILED:  
junit.framework.TestSuite.org.apache.solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI

Error Message:
Could not remove the following files (in the order of attempts):
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001\init-core-data-001:
 java.nio.file.AccessDeniedException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001\init-core-data-001

C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001
 

Stack Trace:
java.io.IOException: Could not remove the following files (in the order of 
attempts):
   
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001\init-core-data-001:
 java.nio.file.AccessDeniedException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001\init-core-data-001
   
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.client.solrj.embedded.TestEmbeddedSolrServerSchemaAPI_27D349E3A266B008-001

at __randomizedtesting.SeedInfo.seed([27D349E3A266B008]:0)
at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329)
at 
org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216)
at 
com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  
junit.framework.TestSuite.org.apache.solr.search.TestCollapseQParserPlugin

Error Message:
Could not remove the following files (in the order of attempts):
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001\init-core-data-001\tlog:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001\init-core-data-001\tlog

C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001\init-core-data-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001\init-core-data-001

C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001\init-core-data-001\tlog\tlog.069:
 java.nio.file.AccessDeniedException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001\init-core-data-001\tlog\tlog.069

C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\solr\build\solr-core\test\J1\temp\solr.search.TestCollapseQParserPlugin_27D349E3A266B008-001
 

Stack Trace:

[jira] [Updated] (SOLR-11795) Add Solr metrics exporter for Prometheus

2018-01-10 Thread Minoru Osuka (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11795?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Minoru Osuka updated SOLR-11795:

Attachment: SOLR-11795-2.patch

Attach new patch file.

> Add Solr metrics exporter for Prometheus
> 
>
> Key: SOLR-11795
> URL: https://issues.apache.org/jira/browse/SOLR-11795
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: metrics
>Affects Versions: 7.2
>Reporter: Minoru Osuka
>Assignee: Koji Sekiguchi
>Priority: Minor
> Attachments: SOLR-11795-2.patch, SOLR-11795.patch, 
> solr-dashboard.png, solr-exporter-diagram.png
>
>
> I 'd like to monitor Solr using Prometheus and Grafana.
> I've already created Solr metrics exporter for Prometheus. I'd like to 
> contribute to contrib directory if you don't mind.
> !solr-exporter-diagram.png|thumbnail!
> !solr-dashboard.png|thumbnail!



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (LUCENE-5455) Nested SpanNear queries lose positional highlights

2018-01-10 Thread David Smiley (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-5455?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

David Smiley resolved LUCENE-5455.
--
   Resolution: Duplicate
Fix Version/s: (was: 6.0)
   (was: 4.9)
   7.3

> Nested SpanNear queries lose positional highlights
> --
>
> Key: LUCENE-5455
> URL: https://issues.apache.org/jira/browse/LUCENE-5455
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/highlighter
>Affects Versions: 4.3.1, 4.6.1
>Reporter: Steve Davids
> Fix For: 7.3
>
> Attachments: LUCENE-5455-Tests.patch
>
>
> Given text of: "x y z x z x a"
> With a query of: spanNear([spanNear([text:x, text:y, text:z], 0, true), 
> text:a], 10, false)
> Resulting highlight: x y z x z x 
> a
> Expected highlight: x y z x z x a
> This is caused because WeightedSpanTermExtractor.extractWeightedSpanTerms 
> takes the SpanQuery and flattens all terms and uses the positions from the 
> outermost SpanNear clause (ignoring the nested SpanNear positions). I believe 
> this could be resolved with a little recursion - walking the span query tree 
> in the extractWeightedSpanTerms method.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8121) UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched positions

2018-01-10 Thread ASF subversion and git services (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8121?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321658#comment-16321658
 ] 

ASF subversion and git services commented on LUCENE-8121:
-

Commit 352ec01a6ef68bc81fdb84a7f72e81a6698f594c in lucene-solr's branch 
refs/heads/master from [~dsmiley]
[ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=352ec01 ]

LUCENE-8121: UH switch to SpanCollector API. Better accuracy.
* Use the filtered freq in position sensitive terms (better scores)
* Refactored UH's OffsetsEnum
* Improved test randomization in TestUnifiedHighlighter & MTQ


> UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched 
> positions
> -
>
> Key: LUCENE-8121
> URL: https://issues.apache.org/jira/browse/LUCENE-8121
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/highlighter
>Reporter: David Smiley
>Assignee: David Smiley
>Priority: Minor
> Fix For: 7.3
>
> Attachments: LUCENE-2287_UH_SpanCollector.patch, 
> LUCENE-2287_UH_SpanCollector.patch
>
>
> The UnifiedHighlighter (and original Highlighter) highlight phrases by 
> converting to a SpanQuery and using the Spans start and end positions to 
> assume that every occurrence of the underlying terms between those positions 
> are to be highlighted.  But this is inaccurate; see LUCENE-5455 for a good 
> example, and also LUCENE-2287.  The solution is to use the SpanCollector API 
> which was introduced after the phrase matching aspects of those highlighters 
> were developed. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS-EA] Lucene-Solr-master-Linux (64bit/jdk-10-ea+37) - Build # 21256 - Failure!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21256/
Java: 64bit/jdk-10-ea+37 -XX:-UseCompressedOops -XX:+UseG1GC

All tests passed

Build Log:
[...truncated 3376 lines...]
   [junit4] JVM J0: stdout was not empty, see: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/analysis/icu/test/temp/junit4-J0-20180111_034338_22517043396416989762877.sysout
   [junit4] >>> JVM J0 emitted unexpected output (verbatim) 
   [junit4] #
   [junit4] # A fatal error has been detected by the Java Runtime Environment:
   [junit4] #
   [junit4] #  SIGSEGV (0xb) at pc=0x7f99d5a82765, pid=7097, tid=7196
   [junit4] #
   [junit4] # JRE version: Java(TM) SE Runtime Environment (10.0+37) (build 
10-ea+37)
   [junit4] # Java VM: Java HotSpot(TM) 64-Bit Server VM (10-ea+37, mixed mode, 
tiered, g1 gc, linux-amd64)
   [junit4] # Problematic frame:
   [junit4] # V  [libjvm.so+0xbd0765]  Node::add_req(Node*)+0xb5
   [junit4] #
   [junit4] # No core dump will be written. Core dumps have been disabled. To 
enable core dumping, try "ulimit -c unlimited" before starting Java again
   [junit4] #
   [junit4] # An error report file with more information is saved as:
   [junit4] # 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/analysis/icu/test/J0/hs_err_pid7097.log
   [junit4] #
   [junit4] # Compiler replay data is saved as:
   [junit4] # 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/analysis/icu/test/J0/replay_pid7097.log
   [junit4] #
   [junit4] # If you would like to submit a bug report, please visit:
   [junit4] #   http://bugreport.java.com/bugreport/crash.jsp
   [junit4] #
   [junit4] <<< JVM J0: EOF 

[...truncated 3 lines...]
   [junit4] ERROR: JVM J0 ended with an exception, command line: 
/home/jenkins/tools/java/64bit/jdk-10-ea+37/bin/java -XX:-UseCompressedOops 
-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath=/home/jenkins/workspace/Lucene-Solr-master-Linux/heapdumps -ea 
-esa --illegal-access=deny -Dtests.prefix=tests -Dtests.seed=45C474CFBB846361 
-Xmx512M -Dtests.iters= -Dtests.verbose=false -Dtests.infostream=false 
-Dtests.codec=random -Dtests.postingsformat=random 
-Dtests.docvaluesformat=random -Dtests.locale=random -Dtests.timezone=random 
-Dtests.directory=random -Dtests.linedocsfile=europarl.lines.txt.gz 
-Dtests.luceneMatchVersion=8.0.0 -Dtests.cleanthreads=perMethod 
-Djava.util.logging.config.file=/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/tools/junit4/logging.properties
 -Dtests.nightly=false -Dtests.weekly=false -Dtests.monster=false 
-Dtests.slow=true -Dtests.asserts=true -Dtests.multiplier=3 -DtempDir=./temp 
-Djava.io.tmpdir=./temp 
-Djunit4.tempDir=/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/analysis/icu/test/temp
 -Dcommon.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene 
-Dclover.db.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/clover/db
 
-Djava.security.policy=/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/tools/junit4/tests.policy
 -Dtests.LUCENE_VERSION=8.0.0 -Djetty.testMode=1 -Djetty.insecurerandom=1 
-Dsolr.directoryFactory=org.apache.solr.core.MockDirectoryFactory 
-Djava.awt.headless=true -Djdk.map.althashing.threshold=0 
-Dtests.src.home=/home/jenkins/workspace/Lucene-Solr-master-Linux 
-Djava.security.egd=file:/dev/./urandom 
-Djunit4.childvm.cwd=/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/build/analysis/icu/test/J0
 -Djunit4.childvm.id=0 -Djunit4.childvm.count=3 -Dfile.encoding=UTF-8 
-Djava.security.manager=org.apache.lucene.util.TestSecurityManager 
-Dtests.filterstacks=true -Dtests.leaveTemporary=false -classpath 

[jira] [Assigned] (SOLR-11795) Add Solr metrics exporter for Prometheus

2018-01-10 Thread Koji Sekiguchi (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11795?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Koji Sekiguchi reassigned SOLR-11795:
-

Assignee: Koji Sekiguchi

> Add Solr metrics exporter for Prometheus
> 
>
> Key: SOLR-11795
> URL: https://issues.apache.org/jira/browse/SOLR-11795
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: metrics
>Affects Versions: 7.2
>Reporter: Minoru Osuka
>Assignee: Koji Sekiguchi
>Priority: Minor
> Attachments: SOLR-11795.patch, solr-dashboard.png, 
> solr-exporter-diagram.png
>
>
> I 'd like to monitor Solr using Prometheus and Grafana.
> I've already created Solr metrics exporter for Prometheus. I'd like to 
> contribute to contrib directory if you don't mind.
> !solr-exporter-diagram.png|thumbnail!
> !solr-dashboard.png|thumbnail!



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8121) UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched positions

2018-01-10 Thread David Smiley (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8121?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321637#comment-16321637
 ] 

David Smiley commented on LUCENE-8121:
--

I benchmarked it using benchmark/conf/highlighters-postings.alg with 
file.query.maker.file=conf/query-phrases.txt and highlighter=UH_PV  (offsets in 
postings with term vectors) and there is only a slight difference that may be 
in the noise.  Seemed same or slightly faster, and slightly less memory.  
That's a wikipedia data set.  

CHANGES.txt:
Improvement:
{noformat}
* LUCENE-8121: UnifiedHighlighter passage relevancy is improved for terms that 
are
  position sensitive (e.g. part of a phrase) by having an accurate freq. (David 
Smiley)
{noformat}
Bug Fixes:
{noformat}
* LUCENE-8121: The UnifiedHighlighter would highlight some terms within some 
nested
  SpanNearQueries at positions where it should not have.  It's fixed in this 
highlighter
  by switching to the SpanCollector API.  The original Highlighter still has 
this
  problem (LUCENE-2287, LUCENE-5455, LUCENE-6796).  Some public but internal 
parts of
  the UH were refactored. (David Smiley, Steve Davids)
{noformat}


> UnifiedHighlighter can highlight terms within SpanNear clauses at unmatched 
> positions
> -
>
> Key: LUCENE-8121
> URL: https://issues.apache.org/jira/browse/LUCENE-8121
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/highlighter
>Reporter: David Smiley
>Assignee: David Smiley
>Priority: Minor
> Fix For: 7.3
>
> Attachments: LUCENE-2287_UH_SpanCollector.patch, 
> LUCENE-2287_UH_SpanCollector.patch
>
>
> The UnifiedHighlighter (and original Highlighter) highlight phrases by 
> converting to a SpanQuery and using the Spans start and end positions to 
> assume that every occurrence of the underlying terms between those positions 
> are to be highlighted.  But this is inaccurate; see LUCENE-5455 for a good 
> example, and also LUCENE-2287.  The solution is to use the SpanCollector API 
> which was introduced after the phrase matching aspects of those highlighters 
> were developed. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (LUCENE-6796) Some terms incorrectly highlighted in complex SpanQuery

2018-01-10 Thread David Smiley (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-6796?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

David Smiley resolved LUCENE-6796.
--
   Resolution: Duplicate
Fix Version/s: 7.3

> Some terms incorrectly highlighted in complex SpanQuery
> ---
>
> Key: LUCENE-6796
> URL: https://issues.apache.org/jira/browse/LUCENE-6796
> Project: Lucene - Core
>  Issue Type: Bug
>  Components: modules/highlighter
>Affects Versions: 5.3
>Reporter: Tim Allison
>Assignee: David Smiley
>Priority: Trivial
> Fix For: 7.3
>
> Attachments: LUCENE-6796-testcase.patch
>
>
> [~modassar] initially raised this on LUCENE-5205.  I'm opening this as a 
> separate issue.
> If a SpanNear is within a SpanOr, it looks like the child terms within the 
> SpanNear query are getting highlighted even if there is no match on that 
> SpanNear query...in some special cases.  Specifically, in the format of the 
> parser in LUCENE-5205 {{"(b [c z]) d\"~2"}}, which is equivalent to: find "b" 
> or the phrase "c z" within two words of "d" either direction
> This affects trunk. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11611) Starting Solr using solr.cmd fails in Windows, when the path contains a parenthesis

2018-01-10 Thread Mikhail Khludnev (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11611?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321628#comment-16321628
 ] 

Mikhail Khludnev commented on SOLR-11611:
-

Life is too short, please submit a patch. 

> Starting Solr using solr.cmd fails in Windows, when the path contains a 
> parenthesis
> ---
>
> Key: SOLR-11611
> URL: https://issues.apache.org/jira/browse/SOLR-11611
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: SolrCLI
>Affects Versions: 7.1
> Environment: Microsoft Windows [Version 10.0.15063]
> java version "1.8.0_144"
> Java(TM) SE Runtime Environment (build 1.8.0_144-b01)
> Java HotSpot(TM) 64-Bit Server VM (build 25.144-b01, mixed mode)
>Reporter: Jakob Furrer
> Fix For: master (8.0), 7.3
>
>   Original Estimate: 1h
>  Remaining Estimate: 1h
>
> Starting Solr using solr.cli fails in Windows, when the path contains spaces.
> Use the following example to reproduce the error:
> {quote}C:\>c:
> C:\>cd "C:\Program Files (x86)\Company Name\ProductName Solr\bin"
> C:\Program Files (x86)\Company Name\ProductName Solr\bin>dir
>  Volume in Laufwerk C: hat keine Bezeichnung.
>  Volumeseriennummer: 8207-3B8B
>  Verzeichnis von C:\Program Files (x86)\Company Name\ProductName Solr\bin
> 06.11.2017  15:52  .
> 06.11.2017  15:52  ..
> 06.11.2017  15:39  init.d
> 03.11.2017  17:32 8 209 post
> 03.11.2017  17:3275 963 solr
> 06.11.2017  14:2469 407 solr.cmd
>3 Datei(en),153 579 Bytes
>3 Verzeichnis(se), 51 191 619 584 Bytes frei
> C:\Program Files (x86)\Company Name\ProductName Solr\bin>solr.cmd start
> *"\Company" kann syntaktisch an dieser Stelle nicht verarbeitet werden.*
> C:\Program Files (x86)\Company Name\ProductName Solr\bin>{quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-7.2-Windows (32bit/jdk1.8.0_144) - Build # 34 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.2-Windows/34/
Java: 32bit/jdk1.8.0_144 -server -XX:+UseParallelGC

9 tests failed.
FAILED:  
junit.framework.TestSuite.org.apache.lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest

Error Message:
Could not remove the following files (in the order of attempts):
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001

C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001\two-lines-each.txt:
 java.nio.file.NoSuchFileException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001\two-lines-each.txt
 

Stack Trace:
java.io.IOException: Could not remove the following files (in the order of 
attempts):
   
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001
   
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001\two-lines-each.txt:
 java.nio.file.NoSuchFileException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\benchmark\test\J0\temp\lucene.benchmark.byTask.tasks.WriteEnwikiLineDocTaskTest_1218B8275068E955-001\benchmark-001\two-lines-each.txt

at __randomizedtesting.SeedInfo.seed([1218B8275068E955]:0)
at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329)
at 
org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216)
at 
com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  
junit.framework.TestSuite.org.apache.lucene.codecs.simpletext.TestSimpleTextCompoundFormat

Error Message:
Could not remove the following files (in the order of attempts):
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\codecs\test\J0\temp\lucene.codecs.simpletext.TestSimpleTextCompoundFormat_A161CCE50B7EED41-001\CFSManySubFiles-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\codecs\test\J0\temp\lucene.codecs.simpletext.TestSimpleTextCompoundFormat_A161CCE50B7EED41-001\CFSManySubFiles-001

C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\codecs\test\J0\temp\lucene.codecs.simpletext.TestSimpleTextCompoundFormat_A161CCE50B7EED41-001\CFSManySubFiles-001\_123.191:
 java.nio.file.AccessDeniedException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\codecs\test\J0\temp\lucene.codecs.simpletext.TestSimpleTextCompoundFormat_A161CCE50B7EED41-001\CFSManySubFiles-001\_123.191

C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\codecs\test\J0\temp\lucene.codecs.simpletext.TestSimpleTextCompoundFormat_A161CCE50B7EED41-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-7.2-Windows\lucene\build\codecs\test\J0\temp\lucene.codecs.simpletext.TestSimpleTextCompoundFormat_A161CCE50B7EED41-001
 

Stack Trace:
java.io.IOException: Could not remove the following files (in the order of 
attempts):
   

[jira] [Commented] (SOLR-11838) explore supporting Deeplearning4j NeuralNetwork models in contrib/ltr

2018-01-10 Thread Adam Gibson (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11838?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321580#comment-16321580
 ] 

Adam Gibson commented on SOLR-11838:


Hi Gus,

There are 2 sources of off heap memory to consider. 
We cover that here:
http://deeplearning4j.org/memory
 For the GPU we have essentially our own GC. The thing to think about here if 
you want to have either cpu or gpu, is both are optional.
I'm more than glad to answer questions on that if anyone has any concerns.

 Beyond that, Nd4j itself is very similar to slf4j. You pick the "chip" you 
want as a jar file.
So you could in theory have 2 class paths, one for cpu and one for gpu, picking 
one as the default.

Pretrain models can be either a computation graph or a multi layer network. We 
have a ModelGuesser that helps mitigate the various types.

We are introducing another type as well soonish that can directly import 
tensorflow and onnx as well (this will be a more flexible api similar to 
pytorch)
which will also work. We will be releasing that within the next few weeks. 
Depending on the timelines for the release, we're happy to coordinate with 
folks interested
in various pretrained models.

This is in top of our existing keras support.

For the untrained network/various hyper parameters, might I suggest allowing 
folks to upload a config of their choice? You can try to offer various kinds of 
sample architectures but we've found that the best way to handle this in 
practice is by just allowing folks to upload their own architectures.

For the datasetiterator: That is mainly used for minibatch training. You can 
also create datasets on the fly as well.

For inference purposes, dl4j makes no assumptions. You could technically just 
call network.output an on INDArray directly)

The solr project might be also interested in our alpha sparse support if you 
need to convert a document vector directly for inference purposes.


> explore supporting Deeplearning4j NeuralNetwork models in contrib/ltr
> -
>
> Key: SOLR-11838
> URL: https://issues.apache.org/jira/browse/SOLR-11838
> Project: Solr
>  Issue Type: New Feature
>  Components: contrib - LTR
>Reporter: Christine Poerschke
> Attachments: SOLR-11838.patch
>
>
> [~yuyano] wrote in SOLR-11597:
> bq. ... If we think to apply this to more complex neural networks in the 
> future, we will need to support layers ...
> [~malcorn_redhat] wrote in SOLR-11597:
> bq. ... In my opinion, if this is a route Solr eventually wants to go, I 
> think a better strategy would be to just add a dependency on 
> [Deeplearning4j|https://deeplearning4j.org/] ...
> Creating this ticket for the idea to be explored further (if anyone is 
> interested in exploring it), complimentary to and independent of the 
> SOLR-11597 RankNet related effort.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11838) explore supporting Deeplearning4j NeuralNetwork models in contrib/ltr

2018-01-10 Thread Gus Heck (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11838?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321567#comment-16321567
 ] 

Gus Heck commented on SOLR-11838:
-

One interesting thing to think about is that ND4J and thus DL4J makes extensive 
use of off heap memory, and require native drivers for GPU utilization. This 
will probably mean that folks might want to add GPU nodes to their existing 
cluster and define a collection that resides on only those nodes, then the ML 
expressions can target collections that  have GPU enabled nodes only... drawing 
data from the existing regular nodes... (I *think* that should be possible 
anyway :) ).

Other wide ranging thoughts I've had... [~cpoerschke], let me know if I'm way 
too far afield, I can go get my own jira(s)... or this could become a parent of 
several jiras...

* Pre-trained models loaded into blob store, with a name that can be used to 
retrieve them and hydrate them with  
ModelSerializer.restoreMultiLayerNetwork(InputStream) where it can serve as the 
guts of a generic predict() expression that can act as a tuple transformer 
(i.e. categorizing each tuple, etc)  - including loading keras models/transfer 
learning etc (something DL4J should be able to do for us).
* an expression that accepts the hyperparameters/dimensions of a layer that can 
be combined with other such expressions to create an untrained network.
* expressions for partitioning the data into test/train (or K-Folds) and 
iterating the training a model (I believe I've seen jiras go by that sound like 
something of the sort exits in streaming expressions already, I know 
[~joel.bernstein] talked about efficient sampling in his LSR talk)
* writing the trained model back to the blob store automatically on each epoch 
on a rolling basis (keeping last N copies) to enable early stopping, or 
selection of best model after K-folds.
* a solrj implementation of DataSetIterator that can use a query to specify a 
set of data to be used for training which then is streamed down, and 
potentially cached locally or re-streamed for training iteration.

In all cases the intent would be that Solr provides Data, environment and 
infrastructure and all ML heavy lifting would be DL4J based, and I would hope 
reusable in a LTR context at the very least by drawing trained models from 
blobstore. I notice LTR has a a model store, is that backed by the blob store, 
or are there 2 places to store content now? (sorry haven't had any real 
opportunity to use LTR yet)

Anyway, that stuff's all been rattling around my head trying to get out, hope 
it's not too much for this ticket.

> explore supporting Deeplearning4j NeuralNetwork models in contrib/ltr
> -
>
> Key: SOLR-11838
> URL: https://issues.apache.org/jira/browse/SOLR-11838
> Project: Solr
>  Issue Type: New Feature
>  Components: contrib - LTR
>Reporter: Christine Poerschke
> Attachments: SOLR-11838.patch
>
>
> [~yuyano] wrote in SOLR-11597:
> bq. ... If we think to apply this to more complex neural networks in the 
> future, we will need to support layers ...
> [~malcorn_redhat] wrote in SOLR-11597:
> bq. ... In my opinion, if this is a route Solr eventually wants to go, I 
> think a better strategy would be to just add a dependency on 
> [Deeplearning4j|https://deeplearning4j.org/] ...
> Creating this ticket for the idea to be explored further (if anyone is 
> interested in exploring it), complimentary to and independent of the 
> SOLR-11597 RankNet related effort.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-7.2-Linux (32bit/jdk1.8.0_144) - Build # 120 - Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.2-Linux/120/
Java: 32bit/jdk1.8.0_144 -client -XX:+UseSerialGC

3 tests failed.
FAILED:  junit.framework.TestSuite.org.apache.solr.core.TestLazyCores

Error Message:
1 thread leaked from SUITE scope at org.apache.solr.core.TestLazyCores: 1) 
Thread[id=914, name=searcherExecutor-397-thread-1, state=WAITING, 
group=TGRP-TestLazyCores] at sun.misc.Unsafe.park(Native Method)
 at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) 
at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
 at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442) 
at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
  at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
at java.lang.Thread.run(Thread.java:748)

Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE 
scope at org.apache.solr.core.TestLazyCores: 
   1) Thread[id=914, name=searcherExecutor-397-thread-1, state=WAITING, 
group=TGRP-TestLazyCores]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
at __randomizedtesting.SeedInfo.seed([969D140F0D87DDCD]:0)


FAILED:  junit.framework.TestSuite.org.apache.solr.core.TestLazyCores

Error Message:
There are still zombie threads that couldn't be terminated:1) 
Thread[id=914, name=searcherExecutor-397-thread-1, state=WAITING, 
group=TGRP-TestLazyCores] at sun.misc.Unsafe.park(Native Method)
 at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) 
at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
 at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442) 
at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
  at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
at java.lang.Thread.run(Thread.java:748)

Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie 
threads that couldn't be terminated:
   1) Thread[id=914, name=searcherExecutor-397-thread-1, state=WAITING, 
group=TGRP-TestLazyCores]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
at __randomizedtesting.SeedInfo.seed([969D140F0D87DDCD]:0)


FAILED:  org.apache.solr.core.TestLazyCores.testNoCommit

Error Message:
Exception during query

Stack Trace:
java.lang.RuntimeException: Exception during query
at 
__randomizedtesting.SeedInfo.seed([969D140F0D87DDCD:49FDB5DEC6A0BE68]:0)
at org.apache.solr.SolrTestCaseJ4.assertQ(SolrTestCaseJ4.java:901)
at org.apache.solr.core.TestLazyCores.check10(TestLazyCores.java:847)
at 
org.apache.solr.core.TestLazyCores.testNoCommit(TestLazyCores.java:829)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 

[jira] [Updated] (SOLR-3218) Range faceting support for CurrencyField

2018-01-10 Thread Hoss Man (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-3218?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Hoss Man updated SOLR-3218:
---
Attachment: SOLR-3218.patch

I added some more {{json.facet}} tests, and dug into the {{Calc.bitsToValue}} 
and {{Calc.bitsToSortableBits}} methods I couldn't make sense of before...

These methods exist solely for the use by {{FacetFieldProcessorByHashDV}} , and 
would never be triggered for Currency fields because 
{{FacetFieldProcessorByHashDV}} only uses these code paths if {{getNumberType() 
!= null}} ... so for the purposes of {{type:reange}} faceting and/or {{field: 
some_currency_field}} (regardless of the faceting type) these are in facet dead 
code.

I added some general javadocs clarifying these methods for the next reader, and 
replaced the {{RuntimeException("nocommit"))}} in the {{CurrencyCalc}} 
instances of these methods with {{SERVER_ERRORs}} that should help make it 
clear what's going wrong if someone breaks the code down the road.


I still want to write some more tests of some of the json.facet edge cases i'm 
not super familiar with to make sure i'm not missing anything, but this should 
resolve all of the open questions/confusion i had.


> Range faceting support for CurrencyField
> 
>
> Key: SOLR-3218
> URL: https://issues.apache.org/jira/browse/SOLR-3218
> Project: Solr
>  Issue Type: Improvement
>  Components: Schema and Analysis
>Reporter: Jan Høydahl
>Assignee: Hoss Man
> Attachments: SOLR-3218-1.patch, SOLR-3218-2.patch, SOLR-3218.patch, 
> SOLR-3218.patch, SOLR-3218.patch, SOLR-3218.patch, SOLR-3218.patch, 
> SOLR-3218.patch, SOLR-3218.patch, SOLR-3218.patch, SOLR-3218.patch, 
> SOLR-3218.patch
>
>
> Spinoff from SOLR-2202. Need to add range faceting capabilities for 
> CurrencyField



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-Windows (32bit/jdk1.8.0_144) - Build # 7106 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Windows/7106/
Java: 32bit/jdk1.8.0_144 -server -XX:+UseConcMarkSweepGC

5 tests failed.
FAILED:  
junit.framework.TestSuite.org.apache.lucene.search.TestSloppyPhraseQuery2

Error Message:
Could not remove the following files (in the order of attempts):
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001

C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001\segments_1:
 java.nio.file.AccessDeniedException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001\segments_1

C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001
 

Stack Trace:
java.io.IOException: Could not remove the following files (in the order of 
attempts):
   
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001
   
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001\segments_1:
 java.nio.file.AccessDeniedException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001\index-MMapDirectory-001\segments_1
   
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J1\temp\lucene.search.TestSloppyPhraseQuery2_CE5D44C9C22BBA54-001

at __randomizedtesting.SeedInfo.seed([CE5D44C9C22BBA54]:0)
at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329)
at 
org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216)
at 
com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  junit.framework.TestSuite.org.apache.solr.cloud.ShardRoutingCustomTest

Error Message:
Could not remove the following files (in the order of attempts):
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\solr\build\solr-core\test\J1\temp\solr.cloud.ShardRoutingCustomTest_7B998C4FCEE38F5B-001\jetty-001\collection1\conf\xslt:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\solr\build\solr-core\test\J1\temp\solr.cloud.ShardRoutingCustomTest_7B998C4FCEE38F5B-001\jetty-001\collection1\conf\xslt

C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\solr\build\solr-core\test\J1\temp\solr.cloud.ShardRoutingCustomTest_7B998C4FCEE38F5B-001\jetty-001\collection1\conf:
 java.nio.file.DirectoryNotEmptyException: 
C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\solr\build\solr-core\test\J1\temp\solr.cloud.ShardRoutingCustomTest_7B998C4FCEE38F5B-001\jetty-001\collection1\conf


[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-9.0.1) - Build # 21255 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21255/
Java: 64bit/jdk-9.0.1 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC

1 tests failed.
FAILED:  
org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.testParse

Error Message:
Error from server at https://127.0.0.1:40319/solr: Collection : 
myalias_2017-10-24 is part of alias myalias remove or modify the alias before 
removing this collection.

Stack Trace:
org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error 
from server at https://127.0.0.1:40319/solr: Collection : myalias_2017-10-24 is 
part of alias myalias remove or modify the alias before removing this 
collection.
at 
__randomizedtesting.SeedInfo.seed([4AFB2367C3409B28:257E683F7F36A8C7]:0)
at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643)
at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
at 
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1104)
at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884)
at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817)
at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
at 
org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.doBefore(TimeRoutedAliasUpdateProcessorTest.java:84)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:968)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 

[jira] [Updated] (LUCENE-8127) BooleanQuery with needsScores=false, rewriteNoScoring improvement

2018-01-10 Thread Michael Braun (JIRA)

 [ 
https://issues.apache.org/jira/browse/LUCENE-8127?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Michael Braun updated LUCENE-8127:
--
Priority: Minor  (was: Major)

> BooleanQuery with needsScores=false, rewriteNoScoring improvement
> -
>
> Key: LUCENE-8127
> URL: https://issues.apache.org/jira/browse/LUCENE-8127
> Project: Lucene - Core
>  Issue Type: Improvement
>Reporter: Michael Braun
>Priority: Minor
>
> In the case needsScores=false, createWeight calls rewriteNoScoring before 
> creating a new BooleanWeight. This in all cases creates a new BooleanQuery, 
> even when it's not necessary (aka there are no MUST clauses). 
> The rewriteNoScoring method could check for something as simple as if 
> (clauseSets.get(Occur.MUST).size() > 0)  before creating a brand new 
> BooleanQuery. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Created] (LUCENE-8127) BooleanQuery with needsScores=false, rewriteNoScoring improvement

2018-01-10 Thread Michael Braun (JIRA)
Michael Braun created LUCENE-8127:
-

 Summary: BooleanQuery with needsScores=false, rewriteNoScoring 
improvement
 Key: LUCENE-8127
 URL: https://issues.apache.org/jira/browse/LUCENE-8127
 Project: Lucene - Core
  Issue Type: Improvement
Reporter: Michael Braun


In the case needsScores=false, createWeight calls rewriteNoScoring before 
creating a new BooleanWeight. This in all cases creates a new BooleanQuery, 
even when it's not necessary (aka there are no MUST clauses). 

The rewriteNoScoring method could check for something as simple as if 
(clauseSets.get(Occur.MUST).size() > 0)  before creating a brand new 
BooleanQuery. 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection

2018-01-10 Thread Gus Heck (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321433#comment-16321433
 ] 

Gus Heck commented on SOLR-11722:
-

Hmm I seem to have a reproducable intermitent failure in my test, I had seen 
this earlier, but it didn't look related and I didn't see it after updating, 
but now it's back... 

ant test  -Dtestcase=CreateRoutedAliasTest -Dtests.method=testV1 
-Dtests.seed=25994AC1D6CA0163 -Dtests.locale=th-TH-u-nu-thai-x-lvariant-TH 
-Dtests.timezone=Pacific/Johnston -Dtests.asserts=true 
-Dtests.file.encoding=UTF-8

leads to:
{code}
   [junit4] ERROR   0.16s | CreateRoutedAliasTest.testV1 <<<
   [junit4]> Throwable #1: javax.net.ssl.SSLHandshakeException: 
sun.security.validator.ValidatorException: PKIX path building failed: 
sun.security.provider.certpath.SunCertPathBuilderException: unable to find 
valid certification path to requested target
   [junit4]>at 
__randomizedtesting.SeedInfo.seed([25994AC1D6CA0163:8A578DB19727ADF3]:0)
   [junit4]>at 
sun.security.ssl.Alerts.getSSLException(Alerts.java:192)
   [junit4]>at 
sun.security.ssl.SSLSocketImpl.fatal(SSLSocketImpl.java:1959)
   [junit4]>at 
sun.security.ssl.Handshaker.fatalSE(Handshaker.java:302)
   [junit4]>at 
sun.security.ssl.Handshaker.fatalSE(Handshaker.java:296)
   [junit4]>at 
sun.security.ssl.ClientHandshaker.serverCertificate(ClientHandshaker.java:1514)
   [junit4]>at 
sun.security.ssl.ClientHandshaker.processMessage(ClientHandshaker.java:216)
   [junit4]>at 
sun.security.ssl.Handshaker.processLoop(Handshaker.java:1026)
   [junit4]>at 
sun.security.ssl.Handshaker.process_record(Handshaker.java:961)
   [junit4]>at 
sun.security.ssl.SSLSocketImpl.readRecord(SSLSocketImpl.java:1072)
   [junit4]>at 
sun.security.ssl.SSLSocketImpl.performInitialHandshake(SSLSocketImpl.java:1385)
   [junit4]>at 
sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:1413)
   [junit4]>at 
sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:1397)
   [junit4]>at 
org.apache.http.conn.ssl.SSLConnectionSocketFactory.createLayeredSocket(SSLConnectionSocketFactory.java:396)
   [junit4]>at 
org.apache.http.conn.ssl.SSLConnectionSocketFactory.connectSocket(SSLConnectionSocketFactory.java:355)
   [junit4]>at 
org.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:142)
   [junit4]>at 
org.apache.http.impl.conn.PoolingHttpClientConnectionManager.connect(PoolingHttpClientConnectionManager.java:359)
   [junit4]>at 
org.apache.http.impl.execchain.MainClientExec.establishRoute(MainClientExec.java:381)
   [junit4]>at 
org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:237)
   [junit4]>at 
org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:185)
   [junit4]>at 
org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89)
   [junit4]>at 
org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:111)
   [junit4]>at 
org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185)
   [junit4]>at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83)
   [junit4]>at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:108)
   [junit4]>at 
org.apache.solr.cloud.CreateRoutedAliasTest.testV1(CreateRoutedAliasTest.java:164)
   [junit4]>at java.lang.Thread.run(Thread.java:748)
   [junit4]> Caused by: sun.security.validator.ValidatorException: PKIX 
path building failed: 
sun.security.provider.certpath.SunCertPathBuilderException: unable to find 
valid certification path to requested target
   [junit4]>at 
sun.security.validator.PKIXValidator.doBuild(PKIXValidator.java:397)
   [junit4]>at 
sun.security.validator.PKIXValidator.engineValidate(PKIXValidator.java:302)
   [junit4]>at 
sun.security.validator.Validator.validate(Validator.java:260)
   [junit4]>at 
sun.security.ssl.X509TrustManagerImpl.validate(X509TrustManagerImpl.java:324)
   [junit4]>at 
sun.security.ssl.X509TrustManagerImpl.checkTrusted(X509TrustManagerImpl.java:229)
   [junit4]>at 
sun.security.ssl.X509TrustManagerImpl.checkServerTrusted(X509TrustManagerImpl.java:124)
   [junit4]>at 
sun.security.ssl.ClientHandshaker.serverCertificate(ClientHandshaker.java:1496)
   [junit4]>... 59 more
   [junit4]> Caused by: 
sun.security.provider.certpath.SunCertPathBuilderException: unable to find 
valid certification path to requested target
   [junit4]>at 

[jira] [Commented] (SOLR-11840) Inconsistencies in the Usage Messages of bin/solr.cmd

2018-01-10 Thread Jakob Furrer (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11840?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321330#comment-16321330
 ] 

Jakob Furrer commented on SOLR-11840:
-

* The description of _solr_ _assert_ has an incorrect usage message
The usage message should describe the syntax for the command, instead it says 
*usage:* *org.apache.solr.util.SolrCLI*.

{code:bgColor=#D3D3D3}
C:\solr-7.2.0_test\bin>solr.cmd assert -help
usage: org.apache.solr.util.SolrCLI
 -e,--exitcode Return an exit code instead of printing
   error message on assert fail.
 -help Print this message
 -m,--message Exception message to be used in place of
   the default error message
 -R,--not-root Asserts that we are NOT the root user
 -r,--root Asserts that we are the root user
 -S,--not-started Asserts that Solr is NOT running on a
   certain URL. Default timeout is 1000ms
 -s,--started Asserts that Solr is running on a certain
   URL. Default timeout is 1000ms
 -t,--timeout  Timeout in ms for commands supporting a
   timeout
 -u,--same-user Asserts that we run as same user that owns
   
 -verbose  Generate verbose log messages
 -x,--existsAsserts that directory  exists
 -X,--not-existsAsserts that directory  does NOT
   exist

C:\solr-7.2.0_test\bin>
{code}


There is no label ':assert_usage' in _bin/solr.cmd_.
Apparently the help text for _>solr.cmd_ _assert_ _-help_ instead comes from 
the Java implementation (see _SolrCLI.java_ line 3398).

> Inconsistencies in the Usage Messages of bin/solr.cmd
> -
>
> Key: SOLR-11840
> URL: https://issues.apache.org/jira/browse/SOLR-11840
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>Affects Versions: 7.2
>Reporter: Jakob Furrer
>  Labels: documentation, easyfix
> Fix For: 7.2
>
> Attachments: solr.cmd.txt, solr.txt, solr_start_help_Syntaxfehler.png
>
>   Original Estimate: 2h
>  Remaining Estimate: 2h
>
> I noticed a number of errors/problems/peculiarities in the Usage Messages 
> that are displayed when using *bin/solr.cmd* with the parameter *_-help_*.
> The items are listed in no particular order and may be addressed 
> independantly.
> To spot the differences between the Usage Messages of _bin/solr_ and 
> _bin/solr.cmd_ I compiled an extract of the Usage Messages of the two files 
> so that they can be compared using WinMerge or a similar diff tool.
> See the attached files *solr.cmd.txt* and *solr.txt*.
> Note that I work on a German Windows 10, therefore some error messages I 
> quote here are in German.
> # _solr_ _start_ _-help_ results in a syntax error
> The special characters '<' and '>' are not escaped.
> The line 314 must be changed as follows:
> {noformat}
> CURRENT : ... the default server/
> SHALL_BE: ... the default server/^
> {noformat}
> \\
> # _solr auth -help_ ends is empty
> A goto label ':auth_usage' with the appropriate Usage Messages already exists.
> At line 266 an additional if-statement is required.
> Also, a respective if-statement will be required on line 1858.
> {noformat}
> NEW_CODE: IF "%SCRIPT_CMD%"=="auth" goto auth_usage
> {noformat}
> Some additional bugs in the section ':auth_usage' must then also be addressed.
> The special character '|' is not escaped at a number of locations.
> The lines 568, 569, 570, 577, 580 and 585 must be changed, e.g.
> {noformat}
> CURRENT : echo Usage: solr auth enable [-type basicAuth] -credentials 
> user:pass [-blockUnknown ^] [-updateIncludeFileOnly 
> ^] [-V]
> SHALL_BE: echo Usage: solr auth enable [-type basicAuth] -credentials 
> user:pass [-blockUnknown ^] [-updateIncludeFileOnly 
> ^] [-V]
> {noformat}
> The empty 'echo' statement (i.e. 'newline') needs the be written with a dot 
> ('echo.') to avoid "ECHO ist ausgeschaltet (OFF)." statements.
> The lines 571, 573, 576, 577, 579, 584, 587, 589, 591, 594 and 596 must be 
> changed:
> {noformat}
> CURRENT : echo
> SHALL_BE: echo.
> {noformat}
> \\
> # _solr_ _-help_ does not mention the command _status_
> The line 271 must be changed as follows:
> {noformat}
> CURRENT : @echowhere COMMAND is one of: start, stop, restart, 
> healthcheck, create, create_core, create_collection, delete, version, zk, 
> auth, assert
> SHALL_BE: @echowhere COMMAND is one of: start, stop, restart, status, 
> healthcheck, create, create_core, create_collection, delete, version, zk, 
> auth, 

[jira] [Updated] (SOLR-5102) Simplify Solr Home

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-5102?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-5102:

Fix Version/s: (was: 6.0)
   Issue Type: Improvement  (was: Bug)

> Simplify Solr Home
> --
>
> Key: SOLR-5102
> URL: https://issues.apache.org/jira/browse/SOLR-5102
> Project: Solr
>  Issue Type: Improvement
>Reporter: Grant Ingersoll
>Assignee: Grant Ingersoll
>
> I think for 5.0, we should re-think some of the variations we support around 
> things like Solr Home, etc.  We have a fair bit of code, I suspect that could 
> just go away if make it easier by assuming there is a single solr home where 
> everything lives.  The notion of making that stuff configurable has outlived 
> its usefulness



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-5046) IllegalArgumentException using distributed group.query when one shard does not match any docs

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-5046?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-5046:

Component/s: search

> IllegalArgumentException using distributed group.query when one shard does 
> not match any docs
> -
>
> Key: SOLR-5046
> URL: https://issues.apache.org/jira/browse/SOLR-5046
> Project: Solr
>  Issue Type: Bug
>  Components: search
>Affects Versions: 4.3
>Reporter: Hoss Man
> Attachments: 
> 0001-Get-distributed-grouping-request-work-with-sort-with.patch, 
> 0002-Get-distributed-grouping-request-work-with-sort-with.patch
>
>
> [Evgeny Salnikov noted this problem on the mailing 
> list|http://mail-archives.apache.org/mod_mbox/lucene-solr-user/201307.mbox/%3CCADz7Cx6PbMxExhb8gsCu9%3DP6nphJd2fYayov_%3D%3D%2Bo1sEXswWLw%40mail.gmail.com%3E],
>  although the initial report was somewhat convoluted by suspicious 
> description of adding shards after the fact.
> Steps to reproduce using 4.3.1 example...
> * startup a 2 node SolrCloud cluster following the "Example A" description on 
> the SolrCloud wiki...
> ** cp example example2
> ** cd example && java -Dbootstrap_confdir=./solr/collection1/conf 
> -Dcollection.configName=myconf -DzkRun -DnumShards=2 -jar start.jar
> ** cd example2 && java -Djetty.port=7574 -DzkHost=localhost:9983 -jar 
> start.jar
> * index exactly one doc (to ensure that subsequent distributed queries get 
> results from only one node)
> ** java -jar post.jar utf8-example.xml
> * execute a request using group.query
> ** http://localhost:7574/solr/select?q=*:*=true=cat:software
> stack trace...
> {noformat}
> 166500 [qtp2092063645-19] ERROR org.apache.solr.servlet.SolrDispatchFilter  – 
> null:java.lang.IllegalArgumentException: shard 1 did not set sort field 
> values (FieldDoc.fields is null); you must pass fillFields=true to 
> IndexSearcher.search on each shard
>   at 
> org.apache.lucene.search.TopDocs$MergeSortQueue.(TopDocs.java:143)
>   at org.apache.lucene.search.TopDocs.merge(TopDocs.java:214)
>   at 
> org.apache.solr.search.grouping.distributed.responseprocessor.TopGroupsShardResponseProcessor.process(TopGroupsShardResponseProcessor.java:114)
>   at 
> org.apache.solr.handler.component.QueryComponent.handleGroupedResponses(QueryComponent.java:619)
>   at 
> org.apache.solr.handler.component.QueryComponent.handleResponses(QueryComponent.java:602)
>   at 
> org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:311)
>   at 
> org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:135)
>   at org.apache.solr.core.SolrCore.execute(SolrCore.java:1816)
> {noformat}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-4989) Implement show=ALL in LukeRequestHandler

2018-01-10 Thread Cassandra Targett (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-4989?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321301#comment-16321301
 ] 

Cassandra Targett commented on SOLR-4989:
-

I'm not sure it's a bug, but I can confirm that {{show=all}} still doesn't work 
although I can see in {{LukeRequestHandler.java}} that {{all}} is one of the 
accepted values for {{show}}.

> Implement show=ALL in LukeRequestHandler
> 
>
> Key: SOLR-4989
> URL: https://issues.apache.org/jira/browse/SOLR-4989
> Project: Solr
>  Issue Type: Bug
>Affects Versions: 4.3.1
>Reporter: Adam Hahn
>
> There are currently 4 options for the "show" field defined in the 
> LukeRequestHandler: schema, index, doc, and all.  Schema, doc, and index are 
> implemented, but "all" is not.  My thought is that show=all would populate 
> the "index", "fields", and "schema" section of the response.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4996) CloudSolrServer Does Not Respect Propagate Requests

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4996?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4996:

Component/s: SolrCloud

> CloudSolrServer Does Not Respect Propagate Requests
> ---
>
> Key: SOLR-4996
> URL: https://issues.apache.org/jira/browse/SOLR-4996
> Project: Solr
>  Issue Type: Bug
>  Components: SolrCloud
>Reporter: Furkan KAMACI
> Attachments: SOLR-4996.patch
>
>
> When using CloudSolrServer if you make a request as like LukeRequest it uses 
> LBHttpSolrServer internally and it sends request to just one Solr Node (via 
> HttpSolrServer) as round robin. So you may get different results for same 
> requests at different times event nothing changes. Using a PropagateServer 
> inside CloudSolrServer will fix that bug.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection

2018-01-10 Thread Gus Heck (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321275#comment-16321275
 ] 

Gus Heck commented on SOLR-11722:
-

Attached a patch, I believe I addressed the above comments I did switch to 
using LocalSolrRequest, but that did not alleviate the need for the v2-v1 and 
parseCollectionCreationProps changes because they get utilized in the _OP enum 
to masage the incomming parameter before requests can be made. rather than in 
the Cmd class where delegation to the Command infrastructure via 
LocalSolrRequest happens. 

Names should be mostly in sync with your changes in SOLR-11653, and I extracted 
your collection creation code into createCollectionAndWait() and reused that 
(which is how I picked up LocalSolrRequest :) ).

What's not in this patch is documentation, but I want to get this out where you 
and others can look at it. Working on docs now, which could be added as an 
independent patch if you want to commit this one.


> API to create a Time Routed Alias and first collection
> --
>
> Key: SOLR-11722
> URL: https://issues.apache.org/jira/browse/SOLR-11722
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: SolrCloud
>Reporter: David Smiley
> Attachments: SOLR-11722.patch, SOLR-11722.patch
>
>
> This issue is about creating a single API command to create a "Time Routed 
> Alias" along with its first collection.  Need to decide what endpoint URL it 
> is and parameters.
> Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or 
> alternatively piggy-back off of command=create-alias but we add more options, 
> perhaps with a prefix like "router"?
> Inputs:
> * alias name
> * misc collection creation metadata (e.g. config, numShards, ...) perhaps in 
> this context with a prefix like "collection."
> * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field
> * date specifier for first collection; can include "date math".
> We'll certainly add more options as future features unfold.
> I believe the collection needs to be created first (referring to the alias 
> name via a core property), and then the alias pointing to it which demands 
> collections exist first.  When figuring the collection name, you'll need to 
> reference the format in TimeRoutedAliasUpdateProcessor.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4944) ChaosMonkeyShardSplitTest does not actually kill the overseer

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4944?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4944:

Component/s: Tests

> ChaosMonkeyShardSplitTest does not actually kill the overseer
> -
>
> Key: SOLR-4944
> URL: https://issues.apache.org/jira/browse/SOLR-4944
> Project: Solr
>  Issue Type: Bug
>  Components: Tests
>Reporter: Shalin Shekhar Mangar
>
> ChaosMonkeyShardSplitTest does not actually kill the overseer so it is not 
> very useful in testing splits in failure scenarios.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11718) Deprecate CDCR Buffer APIs

2018-01-10 Thread Varun Thacker (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11718?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321269#comment-16321269
 ] 

Varun Thacker commented on SOLR-11718:
--

Hi Amrit,

Thanks for picking up this Jira

Few questions while looking at the patch

1. In CdcrRequestHandlerTest#testCheckpointActions why have the asserts been 
commented out?
2. "Since the CdcrReplicationHandlerTest was failing, suggesting typical Index 
Replication will take place when followers are numRecordsToKeep count behind." 
- Maybe we should modify the test to assert document count instead of just 
commenting it out?
3. I don't quite understand the doc changes - "ENABLEBUFFER API has been 
deprecated in favor of when buffering is enabled, the Update Logs will grow 
without limit; they will never be purged."

> Deprecate CDCR Buffer APIs
> --
>
> Key: SOLR-11718
> URL: https://issues.apache.org/jira/browse/SOLR-11718
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: CDCR
>Affects Versions: 7.1
>Reporter: Amrit Sarkar
> Fix For: master (8.0), 7.3
>
> Attachments: SOLR-11718.patch, SOLR-11718.patch
>
>
> Kindly see the discussion on SOLR-11652.
> Today, if we see the current CDCR documentation page, buffering is "disabled" 
> by default in both source and target. We don't see any purpose served by Cdcr 
> buffering and it is quite an overhead considering it can take a lot heap 
> space (tlogs ptr) and forever retention of tlogs on the disk when enabled. 
> Also today, even if we disable buffer from API on source , considering it was 
> enabled at startup, tlogs are never purged on leader node of shards of 
> source, refer jira: SOLR-11652



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11624) collection creation should not also overwrite/delete any configset but it can!

2018-01-10 Thread Ishan Chattopadhyaya (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11624?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321260#comment-16321260
 ] 

Ishan Chattopadhyaya commented on SOLR-11624:
-

[~erickerickson], I'll review the test and the documentation change and try to 
wrap this up this week.

> collection creation should not also overwrite/delete any configset but it can!
> --
>
> Key: SOLR-11624
> URL: https://issues.apache.org/jira/browse/SOLR-11624
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>Affects Versions: 7.2
>Reporter: Erick Erickson
>Assignee: Ishan Chattopadhyaya
> Attachments: SOLR-11624-2.patch, SOLR-11624.3.patch, 
> SOLR-11624.4.patch, SOLR-11624.patch, SOLR-11624.patch
>
>
> Looks like a problem that crept in when we changed the _default configset 
> stuff.
> setup:
> upload a configset named "wiki"
> collections?action=CREATE=wiki&.
> My custom configset "wiki" gets overwritten by _default and then used by the 
> "wiki" collection.
> Assigning to myself only because it really needs to be fixed IMO and I don't 
> want to lose track of it. Anyone else please feel free to take it.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-11722) API to create a Time Routed Alias and first collection

2018-01-10 Thread Gus Heck (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Gus Heck updated SOLR-11722:

Attachment: SOLR-11722.patch

> API to create a Time Routed Alias and first collection
> --
>
> Key: SOLR-11722
> URL: https://issues.apache.org/jira/browse/SOLR-11722
> Project: Solr
>  Issue Type: Sub-task
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: SolrCloud
>Reporter: David Smiley
> Attachments: SOLR-11722.patch, SOLR-11722.patch
>
>
> This issue is about creating a single API command to create a "Time Routed 
> Alias" along with its first collection.  Need to decide what endpoint URL it 
> is and parameters.
> Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or 
> alternatively piggy-back off of command=create-alias but we add more options, 
> perhaps with a prefix like "router"?
> Inputs:
> * alias name
> * misc collection creation metadata (e.g. config, numShards, ...) perhaps in 
> this context with a prefix like "collection."
> * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field
> * date specifier for first collection; can include "date math".
> We'll certainly add more options as future features unfold.
> I believe the collection needs to be created first (referring to the alias 
> name via a core property), and then the alias pointing to it which demands 
> collections exist first.  When figuring the collection name, you'll need to 
> reference the format in TimeRoutedAliasUpdateProcessor.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-NightlyTests-7.x - Build # 117 - Still Unstable

2018-01-10 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/117/

3 tests failed.
FAILED:  
org.apache.solr.handler.TestReplicationHandler.doTestReplicateAfterCoreReload

Error Message:
expected:<[{indexVersion=1515617123857,generation=2,filelist=[_6o.cfe, _6o.cfs, 
_6o.si, _79.cfe, _79.cfs, _79.si, _7b.cfe, _7b.cfs, _7b.si, _7c.cfe, _7c.cfs, 
_7c.si, _7d.cfe, _7d.cfs, _7d.si, _7e.cfe, _7e.cfs, _7e.si, _7f.cfe, _7f.cfs, 
_7f.si, _7g.cfe, _7g.cfs, _7g.si, _7h.cfe, _7h.cfs, _7h.si, _7i.cfe, _7i.cfs, 
_7i.si, _7j.cfe, _7j.cfs, _7j.si, _7k.cfe, _7k.cfs, _7k.si, _7l.cfe, _7l.cfs, 
_7l.si, _7m.cfe, _7m.cfs, _7m.si, _7n.cfe, _7n.cfs, _7n.si, _7o.cfe, _7o.cfs, 
_7o.si, _7q.cfe, _7q.cfs, _7q.si, _7t.cfe, _7t.cfs, _7t.si, _7v.cfe, _7v.cfs, 
_7v.si, _7w.cfe, _7w.cfs, _7w.si, _7x.cfe, _7x.cfs, _7x.si, segments_2]}]> but 
was:<[{indexVersion=1515617123857,generation=2,filelist=[_6o.cfe, _6o.cfs, 
_6o.si, _79.cfe, _79.cfs, _79.si, _7b.cfe, _7b.cfs, _7b.si, _7c.cfe, _7c.cfs, 
_7c.si, _7d.cfe, _7d.cfs, _7d.si, _7e.cfe, _7e.cfs, _7e.si, _7f.cfe, _7f.cfs, 
_7f.si, _7g.cfe, _7g.cfs, _7g.si, _7h.cfe, _7h.cfs, _7h.si, _7i.cfe, _7i.cfs, 
_7i.si, _7j.cfe, _7j.cfs, _7j.si, _7k.cfe, _7k.cfs, _7k.si, _7l.cfe, _7l.cfs, 
_7l.si, _7m.cfe, _7m.cfs, _7m.si, _7n.cfe, _7n.cfs, _7n.si, _7o.cfe, _7o.cfs, 
_7o.si, _7q.cfe, _7q.cfs, _7q.si, _7t.cfe, _7t.cfs, _7t.si, _7v.cfe, _7v.cfs, 
_7v.si, _7w.cfe, _7w.cfs, _7w.si, _7x.cfe, _7x.cfs, _7x.si, segments_2]}, 
{indexVersion=1515617123857,generation=3,filelist=[_7s.cfe, _7s.cfs, _7s.si, 
_7t.cfe, _7t.cfs, _7t.si, _7v.cfe, _7v.cfs, _7v.si, _7w.cfe, _7w.cfs, _7w.si, 
_7x.cfe, _7x.cfs, _7x.si, segments_3]}]>

Stack Trace:
java.lang.AssertionError: 
expected:<[{indexVersion=1515617123857,generation=2,filelist=[_6o.cfe, _6o.cfs, 
_6o.si, _79.cfe, _79.cfs, _79.si, _7b.cfe, _7b.cfs, _7b.si, _7c.cfe, _7c.cfs, 
_7c.si, _7d.cfe, _7d.cfs, _7d.si, _7e.cfe, _7e.cfs, _7e.si, _7f.cfe, _7f.cfs, 
_7f.si, _7g.cfe, _7g.cfs, _7g.si, _7h.cfe, _7h.cfs, _7h.si, _7i.cfe, _7i.cfs, 
_7i.si, _7j.cfe, _7j.cfs, _7j.si, _7k.cfe, _7k.cfs, _7k.si, _7l.cfe, _7l.cfs, 
_7l.si, _7m.cfe, _7m.cfs, _7m.si, _7n.cfe, _7n.cfs, _7n.si, _7o.cfe, _7o.cfs, 
_7o.si, _7q.cfe, _7q.cfs, _7q.si, _7t.cfe, _7t.cfs, _7t.si, _7v.cfe, _7v.cfs, 
_7v.si, _7w.cfe, _7w.cfs, _7w.si, _7x.cfe, _7x.cfs, _7x.si, segments_2]}]> but 
was:<[{indexVersion=1515617123857,generation=2,filelist=[_6o.cfe, _6o.cfs, 
_6o.si, _79.cfe, _79.cfs, _79.si, _7b.cfe, _7b.cfs, _7b.si, _7c.cfe, _7c.cfs, 
_7c.si, _7d.cfe, _7d.cfs, _7d.si, _7e.cfe, _7e.cfs, _7e.si, _7f.cfe, _7f.cfs, 
_7f.si, _7g.cfe, _7g.cfs, _7g.si, _7h.cfe, _7h.cfs, _7h.si, _7i.cfe, _7i.cfs, 
_7i.si, _7j.cfe, _7j.cfs, _7j.si, _7k.cfe, _7k.cfs, _7k.si, _7l.cfe, _7l.cfs, 
_7l.si, _7m.cfe, _7m.cfs, _7m.si, _7n.cfe, _7n.cfs, _7n.si, _7o.cfe, _7o.cfs, 
_7o.si, _7q.cfe, _7q.cfs, _7q.si, _7t.cfe, _7t.cfs, _7t.si, _7v.cfe, _7v.cfs, 
_7v.si, _7w.cfe, _7w.cfs, _7w.si, _7x.cfe, _7x.cfs, _7x.si, segments_2]}, 
{indexVersion=1515617123857,generation=3,filelist=[_7s.cfe, _7s.cfs, _7s.si, 
_7t.cfe, _7t.cfs, _7t.si, _7v.cfe, _7v.cfs, _7v.si, _7w.cfe, _7w.cfs, _7w.si, 
_7x.cfe, _7x.cfs, _7x.si, segments_3]}]>
at 
__randomizedtesting.SeedInfo.seed([AB624E6554A83B1:2F613FD625028DB2]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.failNotEquals(Assert.java:647)
at org.junit.Assert.assertEquals(Assert.java:128)
at org.junit.Assert.assertEquals(Assert.java:147)
at 
org.apache.solr.handler.TestReplicationHandler.doTestReplicateAfterCoreReload(TestReplicationHandler.java:1277)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 

[jira] [Updated] (SOLR-11815) TLOG leaders going down and rejoining as a replica do fullCopy when not needed

2018-01-10 Thread Ishan Chattopadhyaya (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11815?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Ishan Chattopadhyaya updated SOLR-11815:

Attachment: SOLR-11815.patch

Adding a WIP patch that mitigates this issue. In the event of a full copy, I'm 
doing a differential copy: download whatever is new/different, but re-use the 
files that are same. It doesn't have tests, and brittle right now (naive class 
cast).

> TLOG leaders going down and rejoining as a replica do fullCopy when not needed
> --
>
> Key: SOLR-11815
> URL: https://issues.apache.org/jira/browse/SOLR-11815
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: replication (java)
>Affects Versions: 7.2
> Environment: Oracle JDK 1.8
> Ubuntu 16.04
>Reporter: Shaun Sabo
>Assignee: Ishan Chattopadhyaya
> Attachments: SOLR-11815.patch
>
>
> I am running a collection with a persistent high volume of writes. When the 
> leader goes down and recovers, it joins as a replica and asks the new leader 
> for the files to Sync. The isIndexStale check is finding that some files 
> differ in size and checksum which forces a fullCopy. Since our indexes are 
> rather large, a rolling restart is resulting in large amounts of data 
> transfer, and in some cases disk space contention issues.
> I do not believe the fullCopy is necessary given the circumstances. 
> Repro Steps:
> 1. collection/shard with 1 leader and 1 replica are accepting writes
> - Pull interval is 30 seconds
> - Hard Commit interval is 60 seconds
> 2. Replica executes an index pull and completes. 
> 3. Leader process Hard Commits (replica index is delayed)
> 4. leader process is killed (SIGTERM)
> 5. Replica takes over as new leader
> 6. New leader applies TLOG since last pull (cores are binary-divergent now)
> 7. Former leader comes back as New Replica
> 8. New replica initiates recovery
> - Recovery detects that the generation and version are behind and a check 
> is necessary
> 9. isIndexStale() detects that a segment exists on both the New Replica and 
> New Leader but that the size and checksum differ. 
> - This triggers fullCopy to be flagged on
> 10. Entirety of index is pulled regardless of changes
> The majority of files should not have changes, but everything gets pulled 
> because of the first file it finds with a mismatched checksum. 
> Relevant Code:
> https://github.com/apache/lucene-solr/blob/master/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java#L516-L518
> https://github.com/apache/lucene-solr/blob/master/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java#L1105-L1126



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11832) Restore from backup creates old format collections

2018-01-10 Thread Noble Paul (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11832?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321247#comment-16321247
 ] 

Noble Paul commented on SOLR-11832:
---

Going forward we should only use per collection state.json . The monolithic 
clusterstate.json is largely there for backcompat

> Restore from backup creates old format collections
> --
>
> Key: SOLR-11832
> URL: https://issues.apache.org/jira/browse/SOLR-11832
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Backup/Restore
>Affects Versions: 7.2, 6.6.2
>Reporter: Tim Owen
>Assignee: Varun Thacker
>Priority: Minor
> Attachments: SOLR-11832.patch
>
>
> Restoring a collection from a backup always creates the new collection using 
> the old format state json (format 1), as a global clusterstate.json file at 
> top level of ZK. All new collections should be defaulting to use the newer 
> per-collection (format 2) in /collections/.../state.json
> As we're running clusters with many collections, the old global state format 
> isn't good for us, so as a workaround for now we're calling 
> MIGRATESTATEFORMAT immediately after the RESTORE call.
> This bug was mentioned in the comments of SOLR-5750 and also recently 
> mentioned by [~varunthacker] in SOLR-11560
> Code patch attached, but as per [~dsmiley]'s comment in the code, fixing this 
> means at least 1 test class doesn't succeed anymore. From what I can tell, 
> the BasicDistributedZk2Test fails because it's not using the official 
> collection API to create a collection, it seems to be bypassing that and 
> manually creating cores using the core admin api instead, which I think is 
> not enough to ensure the correct ZK nodes are created. The test superclass 
> has some methods to create a collection which do use the collection api so I 
> could try fixing the tests (I'm just not that familiar with why those 
> BasicDistributed*Test classes aren't using the collection api).



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11838) explore supporting Deeplearning4j NeuralNetwork models in contrib/ltr

2018-01-10 Thread Jeroen Steggink (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11838?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321244#comment-16321244
 ] 

Jeroen Steggink commented on SOLR-11838:


That's nice guys! I would like to help on this as well. I have lots of 
experience with Solr and Deeplearning4j. I already played with using 
Deeplearning4j in the Solr streaming API for applying models just like Gus 
mentioned. 

Any ideas on what kind of models should be supported or what kind of data it 
should use as input? As we can have bag of words and or features in simple feed 
forward networks or sequences data with LSTMs which require different formatted 
inputs.
Furthermore, I can imagine users would like to have some influence on what kind 
of data will be used, sparse vectors, word vectors, etc, as these can have a 
huge impact on the performance of the network in both results and the system 
itself.

There's a lot of things to explore here.

Personally I would like to have something which is easy to use which increases 
the adoption of this feature. We could build further on the experiences of 
users.



> explore supporting Deeplearning4j NeuralNetwork models in contrib/ltr
> -
>
> Key: SOLR-11838
> URL: https://issues.apache.org/jira/browse/SOLR-11838
> Project: Solr
>  Issue Type: New Feature
>  Components: contrib - LTR
>Reporter: Christine Poerschke
> Attachments: SOLR-11838.patch
>
>
> [~yuyano] wrote in SOLR-11597:
> bq. ... If we think to apply this to more complex neural networks in the 
> future, we will need to support layers ...
> [~malcorn_redhat] wrote in SOLR-11597:
> bq. ... In my opinion, if this is a route Solr eventually wants to go, I 
> think a better strategy would be to just add a dependency on 
> [Deeplearning4j|https://deeplearning4j.org/] ...
> Creating this ticket for the idea to be explored further (if anyone is 
> interested in exploring it), complimentary to and independent of the 
> SOLR-11597 RankNet related effort.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11832) Restore from backup creates old format collections

2018-01-10 Thread Varun Thacker (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11832?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321236#comment-16321236
 ] 

Varun Thacker commented on SOLR-11832:
--

I didn't hear back from [~noble.paul]  on 
https://issues.apache.org/jira/browse/SOLR-11586?focusedCommentId=16227436=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-16227436
 hence I didn't change the default ( which the patch you attached does ) 

I'm +1 to changing the default but we can do that in another Jira. I think we 
should close this out as a duplicate of SOLR-11586

> Restore from backup creates old format collections
> --
>
> Key: SOLR-11832
> URL: https://issues.apache.org/jira/browse/SOLR-11832
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Backup/Restore
>Affects Versions: 7.2, 6.6.2
>Reporter: Tim Owen
>Assignee: Varun Thacker
>Priority: Minor
> Attachments: SOLR-11832.patch
>
>
> Restoring a collection from a backup always creates the new collection using 
> the old format state json (format 1), as a global clusterstate.json file at 
> top level of ZK. All new collections should be defaulting to use the newer 
> per-collection (format 2) in /collections/.../state.json
> As we're running clusters with many collections, the old global state format 
> isn't good for us, so as a workaround for now we're calling 
> MIGRATESTATEFORMAT immediately after the RESTORE call.
> This bug was mentioned in the comments of SOLR-5750 and also recently 
> mentioned by [~varunthacker] in SOLR-11560
> Code patch attached, but as per [~dsmiley]'s comment in the code, fixing this 
> means at least 1 test class doesn't succeed anymore. From what I can tell, 
> the BasicDistributedZk2Test fails because it's not using the official 
> collection API to create a collection, it seems to be bypassing that and 
> manually creating cores using the core admin api instead, which I think is 
> not enough to ensure the correct ZK nodes are created. The test superclass 
> has some methods to create a collection which do use the collection api so I 
> could try fixing the tests (I'm just not that familiar with why those 
> BasicDistributed*Test classes aren't using the collection api).



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4866) FieldCache insanity when field is used in both faceting and grouping in distributed search (distributed grouping uses SortedDocValues)

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4866?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4866:

Component/s: search

> FieldCache insanity when field is used in both faceting and grouping in 
> distributed search (distributed grouping uses SortedDocValues)
> --
>
> Key: SOLR-4866
> URL: https://issues.apache.org/jira/browse/SOLR-4866
> Project: Solr
>  Issue Type: Bug
>  Components: search
>Reporter: Sannier Elodie
>Priority: Minor
>
> Faceting on a fieldX, either single node or distributed, uses the FieldType 
> of fieldX to fetch a type based array of field values.  Grouping on fieldX 
> uses the same type based arrays in single node solr instances -- but when 
> using distributed grouping, the multipass grouping logic uses SortedDocValues 
> from the FieldCache for fieldX, resulting in "field cache insanity" if you 
> also facet on this field, or execute a query against a single shard.
> This descrepency can be observed in the example cnfigs by executing a simple 
> grouping query, and then also executing a distributed grouping query...
> http://localhost:8983/solr/select?q=*:*=true=popularity
> http://localhost:8983/solr/select?q=*:*=true=popularity=localhost:8983/solr
> http://localhost:8983/solr/admin/mbeans?stats=true=fieldCache
> Background: http://markmail.org/thread/7gctyh6vn3eq5jso



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11832) Restore from backup creates old format collections

2018-01-10 Thread Varun Thacker (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11832?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321233#comment-16321233
 ] 

Varun Thacker commented on SOLR-11832:
--

Hi Tim,

Is SOLR-11586 what you're looking for?

> Restore from backup creates old format collections
> --
>
> Key: SOLR-11832
> URL: https://issues.apache.org/jira/browse/SOLR-11832
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: Backup/Restore
>Affects Versions: 7.2, 6.6.2
>Reporter: Tim Owen
>Assignee: Varun Thacker
>Priority: Minor
> Attachments: SOLR-11832.patch
>
>
> Restoring a collection from a backup always creates the new collection using 
> the old format state json (format 1), as a global clusterstate.json file at 
> top level of ZK. All new collections should be defaulting to use the newer 
> per-collection (format 2) in /collections/.../state.json
> As we're running clusters with many collections, the old global state format 
> isn't good for us, so as a workaround for now we're calling 
> MIGRATESTATEFORMAT immediately after the RESTORE call.
> This bug was mentioned in the comments of SOLR-5750 and also recently 
> mentioned by [~varunthacker] in SOLR-11560
> Code patch attached, but as per [~dsmiley]'s comment in the code, fixing this 
> means at least 1 test class doesn't succeed anymore. From what I can tell, 
> the BasicDistributedZk2Test fails because it's not using the official 
> collection API to create a collection, it seems to be bypassing that and 
> manually creating cores using the core admin api instead, which I think is 
> not enough to ensure the correct ZK nodes are created. The test superclass 
> has some methods to create a collection which do use the collection api so I 
> could try fixing the tests (I'm just not that familiar with why those 
> BasicDistributed*Test classes aren't using the collection api).



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4846) reverseWildCardFilterFactory returns reversed node

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4846?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4846.
---

> reverseWildCardFilterFactory returns reversed node
> --
>
> Key: SOLR-4846
> URL: https://issues.apache.org/jira/browse/SOLR-4846
> Project: Solr
>  Issue Type: Bug
>  Components: query parsers
>Affects Versions: 3.6
> Environment: red hat linx v5.8
>Reporter: Ken Fergus
>Priority: Minor
>
> Searching docs using double wildcards i.e. *fre* returns normal results: 
> "Freedom", as well as reversed nodes: "Interfax". Using NGramFilterFactory is 
> not an option because the field being searched can be extremely large and 
> this would create a tremendous amount of tokens.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4846) reverseWildCardFilterFactory returns reversed node

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4846?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4846.
-
Resolution: Cannot Reproduce

There is likely some configuration missing from the description, but I can't 
figure out what it might be - as stated, I can't reproduce this using Solr 7.2, 
so resolving. Please reopen if more information can be provided.

> reverseWildCardFilterFactory returns reversed node
> --
>
> Key: SOLR-4846
> URL: https://issues.apache.org/jira/browse/SOLR-4846
> Project: Solr
>  Issue Type: Bug
>  Components: query parsers
>Affects Versions: 3.6
> Environment: red hat linx v5.8
>Reporter: Ken Fergus
>Priority: Minor
>
> Searching docs using double wildcards i.e. *fre* returns normal results: 
> "Freedom", as well as reversed nodes: "Interfax". Using NGramFilterFactory is 
> not an option because the field being searched can be extremely large and 
> this would create a tremendous amount of tokens.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (SOLR-11601) geodist fails for some fields when field is in parenthesis instead of sfield param

2018-01-10 Thread David Smiley (JIRA)

[ 
https://issues.apache.org/jira/browse/SOLR-11601?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16321200#comment-16321200
 ] 

David Smiley commented on SOLR-11601:
-

Really either.  I'd be happy to review but I don't have time.  

If you improve the error message the user sees, then this would probably 
involve the addition of a new try-catch where GeoDistValueSourceParser calls 
fp.parseValueSourceList to detect this situation and then advice specifically 
on what to do.

> geodist fails for some fields when field is in parenthesis instead of sfield 
> param
> --
>
> Key: SOLR-11601
> URL: https://issues.apache.org/jira/browse/SOLR-11601
> Project: Solr
>  Issue Type: Improvement
>  Security Level: Public(Default Security Level. Issues are Public) 
>  Components: spatial
>Affects Versions: 6.6
>Reporter: Clemens Wyss
>Priority: Minor
>
> Im switching my schemas from derprecated solr.LatLonType to 
> solr.LatLonPointSpatialField.
> Now my sortquery (which used to work with solr.LatLonType):
> *sort=geodist(b4_location__geo_si,47.36667,8.55) asc*
> raises the error
> {color:red}*"sort param could not be parsed as a query, and is not a field 
> that exists in the index: geodist(b4_location__geo_si,47.36667,8.55)"*{color}
> Invoking sort using syntax 
> {color:#14892c}sfield=b4_location__geo_si=47.36667,8.55=geodist() asc
> works as expected though...{color}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4846) reverseWildCardFilterFactory returns reversed node

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4846?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4846:

Priority: Minor  (was: Critical)

> reverseWildCardFilterFactory returns reversed node
> --
>
> Key: SOLR-4846
> URL: https://issues.apache.org/jira/browse/SOLR-4846
> Project: Solr
>  Issue Type: Bug
>  Components: query parsers
>Affects Versions: 3.6
> Environment: red hat linx v5.8
>Reporter: Ken Fergus
>Priority: Minor
>
> Searching docs using double wildcards i.e. *fre* returns normal results: 
> "Freedom", as well as reversed nodes: "Interfax". Using NGramFilterFactory is 
> not an option because the field being searched can be extremely large and 
> this would create a tremendous amount of tokens.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4841) DetectedLanguage constructor should be public

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4841?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4841:

Component/s: contrib - LangId

> DetectedLanguage constructor should be public
> -
>
> Key: SOLR-4841
> URL: https://issues.apache.org/jira/browse/SOLR-4841
> Project: Solr
>  Issue Type: Bug
>  Components: contrib - LangId
>Reporter: Maciej Lizewski
>
> org.apache.solr.update.processor.DetectedLanguage constructor should be 
> public. Without that it is impossible to create owne class extending 
> LanguageIdentifierUpdateProcessor.
> LanguageIdentifierUpdateProcessor base class needs detectLanguage(String 
> content) function to return listy of DetectedLanguage's but you cannot create 
> such objects because constructor is accessible only in same package 
> (org.apache.solr.update.processor).



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-master-Solaris (64bit/jdk1.8.0) - Build # 1618 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Solaris/1618/
Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC

2 tests failed.
FAILED:  
org.apache.solr.cloud.autoscaling.sim.TestDistribStateManager.testGetSetRemoveData

Error Message:
Node watch should have fired!

Stack Trace:
java.lang.AssertionError: Node watch should have fired!
at 
__randomizedtesting.SeedInfo.seed([851D5DC2D290708A:A38DB290861EB680]:0)
at org.junit.Assert.fail(Assert.java:93)
at 
org.apache.solr.cloud.autoscaling.sim.TestDistribStateManager.testGetSetRemoveData(TestDistribStateManager.java:256)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)


FAILED:  
junit.framework.TestSuite.org.apache.solr.security.hadoop.TestImpersonationWithHadoopAuth

Error Message:
2 threads leaked from SUITE scope at 
org.apache.solr.security.hadoop.TestImpersonationWithHadoopAuth: 1) 
Thread[id=15864, name=jetty-launcher-3520-thread-2-EventThread, 

[jira] [Updated] (SOLR-4824) Fuzzy / Faceting results are changed after ingestion of documents past a certain number

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4824?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4824:

Component/s: search

> Fuzzy / Faceting results are changed after ingestion of documents past a 
> certain number 
> 
>
> Key: SOLR-4824
> URL: https://issues.apache.org/jira/browse/SOLR-4824
> Project: Solr
>  Issue Type: Bug
>  Components: search
>Affects Versions: 4.2, 4.3
> Environment: Ubuntu 12.04 LTS 12.04.2 
> jre1.7.0_17
> jboss-as-7.1.1.Final
>Reporter: Lakshmi Venkataswamy
>
> In upgrading from SOLR 3.6 to 4.2/4.3 and comparing results on fuzzy queries, 
> I found that after a certain number of documents were ingested the fuzzy 
> query had drastically lower number of results.  We have approximately 18,000 
> documents per day and after ingesting approximately 40 days of documents, the 
> next incremental day of documents results in a lower number of results of a 
> fuzzy search.
> The query :  
> http://10.100.1.xx:8080/solr/corex/select?q=cc:worde~1=on=date=date
> produces the following result before the threshold is crossed
> 
> 02349 name="facet">ondate
> cc:worde~1 name="facet.field">date numFound="362803" start="0">
>  name="facet_fields">
> 2866
> 11372
> 11514
> 12015
> 11746
> 10853
> 11053
> 11815
> 11427
> 11475
> 11461
> 12058
> 11335
> 12039
> 12064
> 12234
> 12545
> 11766
> 12197
> 11414
> 11633
> 12863
> 12378
> 11947
> 11822
> 11882
> 10474
> 11051
> 11776
> 11957
> 11260
> 8511
>  name="facet_ranges"/>
> Once the 40 days of documents ingested threshold is crossed the results drop 
> as show below for the same query
> 
> 02 name="facet">ondate name="q">cc:worde~1date
> 
>  name="facet_fields">
> 0
> 41
> 21
> 24
> 19
> 9
> 11
> 17
> 14
> 24
> 43
> 14
> 52
> 57
> 25
> 17
> 34
> 11
> 16
> 121
> 33
> 26
> 59
> 27
> 10
> 9
> 6
> 16
> 11
> 15
> 21
> 109
> 11
> 7
> 10
> 8
> 13
> 75
> 77
> 31
> 35
> 22
> 18
> 11
> 68
> 40
>  name="facet_ranges"/>
> I have also tested this with different months of data and have seen the same 
> issue  around the number of documents.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4804) remove support for "defaultCore" and "DEFAULT_DEFAULT_CORE_NAME" in solr 5.x

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4804?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4804:

Fix Version/s: (was: 6.0)
   Issue Type: Improvement  (was: Bug)

> remove support for "defaultCore" and "DEFAULT_DEFAULT_CORE_NAME" in solr 5.x
> 
>
> Key: SOLR-4804
> URL: https://issues.apache.org/jira/browse/SOLR-4804
> Project: Solr
>  Issue Type: Improvement
>  Components: multicore
>Reporter: Hoss Man
>Assignee: Mark Miller
>
> In thread "[JENKINS] Lucene-Solr-SmokeRelease-4.x - Build # 69 - Failure" 
> miller suggested that we should phase out the concept of a default core name 
> by 5.x -- removing this is probably easy, but we need to change a lot of 
> little things to do this cleanly.
> Off the top of my head...
> * smoke checker assumes simple URLs
> * post.jar - needs to make URL mandatory
> * lots of doc examples need updated



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4804) remove support for "defaultCore" and "DEFAULT_DEFAULT_CORE_NAME" in solr 5.x

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4804?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4804:

Component/s: multicore

> remove support for "defaultCore" and "DEFAULT_DEFAULT_CORE_NAME" in solr 5.x
> 
>
> Key: SOLR-4804
> URL: https://issues.apache.org/jira/browse/SOLR-4804
> Project: Solr
>  Issue Type: Improvement
>  Components: multicore
>Reporter: Hoss Man
>Assignee: Mark Miller
>
> In thread "[JENKINS] Lucene-Solr-SmokeRelease-4.x - Build # 69 - Failure" 
> miller suggested that we should phase out the concept of a default core name 
> by 5.x -- removing this is probably easy, but we need to change a lot of 
> little things to do this cleanly.
> Off the top of my head...
> * smoke checker assumes simple URLs
> * post.jar - needs to make URL mandatory
> * lots of doc examples need updated



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4802) Atomic Updated for large documents is very slow and at some point Server stops responding.

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4802?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4802:

Component/s: update

> Atomic Updated for large documents is very slow and at some point Server 
> stops responding. 
> ---
>
> Key: SOLR-4802
> URL: https://issues.apache.org/jira/browse/SOLR-4802
> Project: Solr
>  Issue Type: Bug
>  Components: update
>Affects Versions: 4.2.1
> Environment: Jboss 5.1 with Solr 4.2.1 and JDk 1.6.0_33u
>Reporter: Aditya
>Priority: Critical
>
> I am updating three fields in the document which are of type long and float. 
> This is an atomic update. Updating around 30K doucments and i always get 
> stuck after 80%.
> Update 200 docs per request in a thread and i execute 5 such thread in 
> parallel. 
> The current work around is that i have auto-commit setup for 1 docs with 
> openSearcher = false.
> i guess that this is related to SOLR-4589
> Some additional information 
> the machine is 6core with 5GB Heap. ramBuffer=1024MB



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4754) solrcloud does not detect an implicit "host" and does not provide clear error using 4x example

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4754?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4754:

Component/s: SolrCloud

> solrcloud does not detect an implicit "host" and does not provide clear error 
> using 4x example
> --
>
> Key: SOLR-4754
> URL: https://issues.apache.org/jira/browse/SOLR-4754
> Project: Solr
>  Issue Type: Bug
>  Components: SolrCloud
>Reporter: Hoss Man
>Assignee: Mark Miller
> Fix For: 4.9, 6.0
>
>
> Testing out the 4.3.0 RC3, I tried to run through the SolrCloud examples.
> Following the steps for "Example A: Simple two shard cluster" my two nodes 
> started up w/o any obvious problem, however the I noticed the cluster graph 
> was empty, and attempts to index documents failed with invalid url errors 
> when trying to forward the distributed updates.  Closer inspection of the 
> cluster state lead me to discover that the URLs for the nodes as registered 
> with ZK did not include any host information at all.  (details to follow in 
> comment)
> Apparently, the logic for implicitly detecting a hostname to use with 
> SolrCloud failed to work, and did not cause any sort of startup error.
> Important things to note:
> # java clearly _did_ know what the current configured hostname was for my 
> machine, because it appeared correctly in the {{}} tag of the admin 
> UI (pulled from "/admin/system") so i don't think this probablem is specific 
> to any sort of glitch in my hostname configuration.
> # explicitly setting the "host" sys prop (as used in the example solr.xml) 
> worked around the problem
> # I could _not_ reproduce this problem with Solr 4.2.1 (using the 4.2.1 
> example configs)
> We should try to make the hostname/url detection logic smarter (i'm not sure 
> why it isn't working as well as the SystemInfoHandler) and it should fail 
> loudly on startup as last resort rather then registering the node with ZK 
> using an invalid URL.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4769) Highlight Current Node in SolrCloud graphical display

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4769?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4769:

Issue Type: Improvement  (was: Bug)

> Highlight Current Node in SolrCloud graphical display
> -
>
> Key: SOLR-4769
> URL: https://issues.apache.org/jira/browse/SOLR-4769
> Project: Solr
>  Issue Type: Improvement
>  Components: Admin UI, SolrCloud
>Affects Versions: 4.2
>Reporter: Mark Bennett
>
> When running in Cloud mode, we show a graphical picture of the current 
> cluster in the URL http://localhost:8983/solr/#/~cloud
> It'd be nice to visually indicate which of these nodes I'm currently on
> In SOLR-4438 Stefan Matheis (steffkes) mentioned he thought this was a nice 
> idea, but should be logged as a separate bug in JIRA.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4770) Mouse-over info for other nodes in SolrCloud rendering

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4770?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4770:

Issue Type: Improvement  (was: Bug)

> Mouse-over info for other nodes in SolrCloud rendering
> --
>
> Key: SOLR-4770
> URL: https://issues.apache.org/jira/browse/SOLR-4770
> Project: Solr
>  Issue Type: Improvement
>Affects Versions: 4.2
>Reporter: Mark Bennett
>
> We render a nice graphical view of your current cluster from the URL 
> http://localhost:8983/solr/#/~cloud
> It'd be nice to gather some info about those other nodes without having to 
> visit them.  Since you can't right-click to open those nodes in another tab, 
> your only choice now is to duplicate that tab, and then click the link.
> This was mentioned in SOLR-4438 but Stefan Matheis (steffkes) requested that 
> it be broken out into a separate task and detailed a bit more.
> Mouse-Over often indicates a Balloon-help style thing, but another option is 
> to reserve another part of the screen down below that can display node info, 
> and then refresh it as the mouse hovers over various nodes.  I'm not sure 
> which is easier nor which is more accessible.
> Sadly some devices don't support Hover, and the click method is already used 
> to take you to that other node.  So perhaps it'd be easier to display some 
> type of "(i)" / Info icon that you can specifically click on to see that 
> node's info, without jumping to it.
> Specific items it might be nice to see:
> (generally mirroring what's on default Dashboard screen if you were to visit 
> that node)
> * Start
> * Host
> * CWD
> * Instance
> * Data
> * Index
> * Solr & Lucene version (to help with upgrading, though not sure we need all 
> 4 fields?)
> Resources:
> * JVM meory
> * maybe Physical Memory
> * Some hint about disk stats?
> Args of:
> -DnumShards
> -DzkRun
> -STOP
> ... not sure how many could get too much, but some would be handy



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4753) SEVERE: Too many close [count:-1] for SolrCore in logs (4.2.1)

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4753?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4753:

Component/s: SolrCloud

> SEVERE: Too many close [count:-1] for SolrCore in logs (4.2.1)
> --
>
> Key: SOLR-4753
> URL: https://issues.apache.org/jira/browse/SOLR-4753
> Project: Solr
>  Issue Type: Bug
>  Components: SolrCloud
>Reporter: Mark Miller
>Assignee: Mark Miller
> Fix For: 4.9, 6.0
>
>
> a user reported core reference counting issues in 4.2.1...
> http://markmail.org/message/akrrj5o24prasm6e



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4740) we should protect aliases with volatile in ZkStateReader

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4740?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4740.
---

> we should protect aliases with volatile in ZkStateReader
> 
>
> Key: SOLR-4740
> URL: https://issues.apache.org/jira/browse/SOLR-4740
> Project: Solr
>  Issue Type: Bug
>Reporter: milesli
>
> old code 
>   private Aliases aliases = new Aliases();
> new code
>   private volatile Aliases aliases = new Aliases();



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4740) we should protect aliases with volatile in ZkStateReader

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4740?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4740.
-
Resolution: Won't Fix

It's really unclear what this is referring to, making it nearly impossible 
anyone will act on it. I checked the "old code" referenced above and it no 
longer exists (if it ever did) in Solr's code, so without a more detailed use 
case or example it's not clear what should happen today.

> we should protect aliases with volatile in ZkStateReader
> 
>
> Key: SOLR-4740
> URL: https://issues.apache.org/jira/browse/SOLR-4740
> Project: Solr
>  Issue Type: Bug
>Reporter: milesli
>
> old code 
>   private Aliases aliases = new Aliases();
> new code
>   private volatile Aliases aliases = new Aliases();



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS] Lucene-Solr-NightlyTests-7.2 - Build # 6 - Still Failing

2018-01-10 Thread Apache Jenkins Server
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.2/6/

13 tests failed.
FAILED:  org.apache.lucene.index.TestBinaryDocValuesUpdates.testTonsOfUpdates

Error Message:
Java heap space

Stack Trace:
java.lang.OutOfMemoryError: Java heap space
at 
__randomizedtesting.SeedInfo.seed([7C2475C2A17CDE9E:401ABC9435CF17C]:0)
at org.apache.lucene.store.RAMFile.newBuffer(RAMFile.java:78)
at org.apache.lucene.store.RAMFile.addBuffer(RAMFile.java:51)
at 
org.apache.lucene.store.RAMOutputStream.switchCurrentBuffer(RAMOutputStream.java:164)
at 
org.apache.lucene.store.RAMOutputStream.writeBytes(RAMOutputStream.java:150)
at 
org.apache.lucene.store.MockIndexOutputWrapper.writeBytes(MockIndexOutputWrapper.java:141)
at 
org.apache.lucene.codecs.memory.MemoryDocValuesConsumer.addBinaryField(MemoryDocValuesConsumer.java:330)
at 
org.apache.lucene.codecs.memory.MemoryDocValuesConsumer.addBinaryField(MemoryDocValuesConsumer.java:303)
at 
org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat$FieldsWriter.addBinaryField(PerFieldDocValuesFormat.java:114)
at 
org.apache.lucene.index.ReadersAndUpdates.handleBinaryDVUpdates(ReadersAndUpdates.java:542)
at 
org.apache.lucene.index.ReadersAndUpdates.writeFieldUpdates(ReadersAndUpdates.java:719)
at 
org.apache.lucene.index.IndexWriter$ReaderPool.writeSomeDocValuesUpdates(IndexWriter.java:705)
at 
org.apache.lucene.index.FrozenBufferedUpdates.apply(FrozenBufferedUpdates.java:331)
at 
org.apache.lucene.index.DocumentsWriter$ResolveUpdatesEvent.process(DocumentsWriter.java:739)
at 
org.apache.lucene.index.IndexWriter.processEvents(IndexWriter.java:5106)
at 
org.apache.lucene.index.IndexWriter.processEvents(IndexWriter.java:5096)
at 
org.apache.lucene.index.IndexWriter.updateDocValues(IndexWriter.java:1891)
at 
org.apache.lucene.index.TestBinaryDocValuesUpdates.testTonsOfUpdates(TestBinaryDocValuesUpdates.java:1323)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)


FAILED:  
junit.framework.TestSuite.org.apache.lucene.index.TestBinaryDocValuesUpdates

Error Message:
Captured an uncaught exception in thread: Thread[id=5193, name=Lucene Merge 
Thread #1, state=RUNNABLE, group=TGRP-TestBinaryDocValuesUpdates]

Stack Trace:
com.carrotsearch.randomizedtesting.UncaughtExceptionError: Captured an uncaught 
exception in thread: Thread[id=5193, name=Lucene Merge Thread #1, 
state=RUNNABLE, group=TGRP-TestBinaryDocValuesUpdates]
at __randomizedtesting.SeedInfo.seed([7C2475C2A17CDE9E]:0)
Caused by: org.apache.lucene.index.MergePolicy$MergeException: 
org.apache.lucene.store.AlreadyClosedException: refusing to delete any files: 
this IndexWriter hit an unrecoverable exception
at __randomizedtesting.SeedInfo.seed([7C2475C2A17CDE9E]:0)
at 
org.apache.lucene.index.ConcurrentMergeScheduler.handleMergeException(ConcurrentMergeScheduler.java:703)
at 
org.apache.lucene.index.ConcurrentMergeScheduler$MergeThread.run(ConcurrentMergeScheduler.java:683)
Caused by: org.apache.lucene.store.AlreadyClosedException: refusing to delete 
any files: this IndexWriter hit an unrecoverable exception
at 
org.apache.lucene.index.IndexFileDeleter.ensureOpen(IndexFileDeleter.java:345)
at 
org.apache.lucene.index.IndexFileDeleter.deleteFiles(IndexFileDeleter.java:697)
at 
org.apache.lucene.index.IndexFileDeleter.deleteNewFiles(IndexFileDeleter.java:692)
at 

[jira] [Updated] (SOLR-4739) May be lost update when creating or deleting Alias

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4739?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4739:

Component/s: SolrCloud

> May be lost update when creating or deleting Alias 
> ---
>
> Key: SOLR-4739
> URL: https://issues.apache.org/jira/browse/SOLR-4739
> Project: Solr
>  Issue Type: Bug
>  Components: SolrCloud
>Reporter: milesli
>
> we may create or delete alias before zookeeper notify alias watch, 
> then clone the current aliases and update
> finally send data to zookeeper.
> so it may lead to lost of update. 
> add a line code:  zkStateReader.setAliases(newAliases);
> ...
> try {
>   zkStateReader.getZkClient().setData(ZkStateReader.ALIASES,
>   jsonBytes, true);
>   
>  // update the current aliases
>   zkStateReader.setAliases(newAliases);
>   
>   checkForAlias(aliasName, collections);
>   // some fudge for other nodes
>   Thread.sleep(100);
> } catch (KeeperException e) {
>   log.error("", e);
>   throw new SolrException(ErrorCode.SERVER_ERROR, e);
> } catch (InterruptedException e) {
>   log.warn("", e);
>   throw new SolrException(ErrorCode.SERVER_ERROR, e);
> }
> ...



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4728) Getting a null pointer exception when i dont use the updateLog parameter in solrConfig.xml

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4728?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4728.
-
Resolution: Invalid

>From the log entries showing the errors, it appears ZK is in use, so it's 
>SolrCloud, and the updateLog is required for SolrCloud.

> Getting a null pointer exception when i dont use the updateLog parameter in 
> solrConfig.xml
> --
>
> Key: SOLR-4728
> URL: https://issues.apache.org/jira/browse/SOLR-4728
> Project: Solr
>  Issue Type: Bug
>  Components: Schema and Analysis
>Affects Versions: 4.2
>Reporter: Vicky Desai
>Priority: Minor
> Attachments: solrconfig.xml
>
>
> If i disable update log in solr 4.2 then i get the following exception
> SEVERE: :java.lang.NullPointerException
> at 
> org.apache.solr.cloud.ShardLeaderElectionContext.runLeaderProcess(ElectionContext.java:190)
> at 
> org.apache.solr.cloud.LeaderElector.runIamLeaderProcess(LeaderElector.java:156)
> at 
> org.apache.solr.cloud.LeaderElector.checkIfIamLeader(LeaderElector.java:100)
> at 
> org.apache.solr.cloud.LeaderElector.joinElection(LeaderElector.java:266)
> at 
> org.apache.solr.cloud.ZkController.joinElection(ZkController.java:935)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:761)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:727)
> at 
> org.apache.solr.core.CoreContainer.registerInZk(CoreContainer.java:908)
> at 
> org.apache.solr.core.CoreContainer.registerCore(CoreContainer.java:892)
> at org.apache.solr.core.CoreContainer.register(CoreContainer.java:841)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:638)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:629)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
> at java.lang.Thread.run(Thread.java:619)
> Apr 12, 2013 6:39:56 PM org.apache.solr.common.SolrException log
> SEVERE: null:org.apache.solr.common.cloud.ZooKeeperException:
> at 
> org.apache.solr.core.CoreContainer.registerInZk(CoreContainer.java:931)
> at 
> org.apache.solr.core.CoreContainer.registerCore(CoreContainer.java:892)
> at org.apache.solr.core.CoreContainer.register(CoreContainer.java:841)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:638)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:629)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
> at java.lang.Thread.run(Thread.java:619)
> Caused by: java.lang.NullPointerException
> at 
> org.apache.solr.cloud.ShardLeaderElectionContext.runLeaderProcess(ElectionContext.java:190)
> at 
> org.apache.solr.cloud.LeaderElector.runIamLeaderProcess(LeaderElector.java:156)
> at 
> org.apache.solr.cloud.LeaderElector.checkIfIamLeader(LeaderElector.java:100)
> at 
> org.apache.solr.cloud.LeaderElector.joinElection(LeaderElector.java:266)
> at 
> org.apache.solr.cloud.ZkController.joinElection(ZkController.java:935)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:761)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:727)
> at 
> org.apache.solr.core.CoreContainer.registerInZk(CoreContainer.java:908)
> ... 12 more
> and solr fails to start . However if i add updatelog in my solrconfig.xml it 
> starts.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4728) Getting a null pointer exception when i dont use the updateLog parameter in solrConfig.xml

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4728?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4728.
---

> Getting a null pointer exception when i dont use the updateLog parameter in 
> solrConfig.xml
> --
>
> Key: SOLR-4728
> URL: https://issues.apache.org/jira/browse/SOLR-4728
> Project: Solr
>  Issue Type: Bug
>  Components: Schema and Analysis
>Affects Versions: 4.2
>Reporter: Vicky Desai
>Priority: Minor
> Attachments: solrconfig.xml
>
>
> If i disable update log in solr 4.2 then i get the following exception
> SEVERE: :java.lang.NullPointerException
> at 
> org.apache.solr.cloud.ShardLeaderElectionContext.runLeaderProcess(ElectionContext.java:190)
> at 
> org.apache.solr.cloud.LeaderElector.runIamLeaderProcess(LeaderElector.java:156)
> at 
> org.apache.solr.cloud.LeaderElector.checkIfIamLeader(LeaderElector.java:100)
> at 
> org.apache.solr.cloud.LeaderElector.joinElection(LeaderElector.java:266)
> at 
> org.apache.solr.cloud.ZkController.joinElection(ZkController.java:935)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:761)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:727)
> at 
> org.apache.solr.core.CoreContainer.registerInZk(CoreContainer.java:908)
> at 
> org.apache.solr.core.CoreContainer.registerCore(CoreContainer.java:892)
> at org.apache.solr.core.CoreContainer.register(CoreContainer.java:841)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:638)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:629)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
> at java.lang.Thread.run(Thread.java:619)
> Apr 12, 2013 6:39:56 PM org.apache.solr.common.SolrException log
> SEVERE: null:org.apache.solr.common.cloud.ZooKeeperException:
> at 
> org.apache.solr.core.CoreContainer.registerInZk(CoreContainer.java:931)
> at 
> org.apache.solr.core.CoreContainer.registerCore(CoreContainer.java:892)
> at org.apache.solr.core.CoreContainer.register(CoreContainer.java:841)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:638)
> at org.apache.solr.core.CoreContainer$3.call(CoreContainer.java:629)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
> at java.util.concurrent.FutureTask.run(FutureTask.java:138)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
> at java.lang.Thread.run(Thread.java:619)
> Caused by: java.lang.NullPointerException
> at 
> org.apache.solr.cloud.ShardLeaderElectionContext.runLeaderProcess(ElectionContext.java:190)
> at 
> org.apache.solr.cloud.LeaderElector.runIamLeaderProcess(LeaderElector.java:156)
> at 
> org.apache.solr.cloud.LeaderElector.checkIfIamLeader(LeaderElector.java:100)
> at 
> org.apache.solr.cloud.LeaderElector.joinElection(LeaderElector.java:266)
> at 
> org.apache.solr.cloud.ZkController.joinElection(ZkController.java:935)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:761)
> at org.apache.solr.cloud.ZkController.register(ZkController.java:727)
> at 
> org.apache.solr.core.CoreContainer.registerInZk(CoreContainer.java:908)
> ... 12 more
> and solr fails to start . However if i add updatelog in my solrconfig.xml it 
> starts.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4697) add 'zkHost="${zkHost}"' to example solr.xml and deprecate hardcoded check for zkHost system property

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4697?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4697:

Component/s: SolrCloud
 Issue Type: Improvement  (was: Bug)

> add 'zkHost="${zkHost}"' to example solr.xml and deprecate hardcoded check 
> for zkHost system property
> -
>
> Key: SOLR-4697
> URL: https://issues.apache.org/jira/browse/SOLR-4697
> Project: Solr
>  Issue Type: Improvement
>  Components: SolrCloud
>Reporter: Hoss Man
>
> Similar to the work done in SOLR-4622 related to hostContext and hostPort, we 
> should do a better job of advertising the zkHost setting in solr.xml and 
> deprecate the explicit check for a zkHost property in SolrCore.initZooKeeper 
> -- so that it still works in 4.x with a warning, but in 5.0 zkHost must be 
> specified in solr.xml, even if it's just to refer to a sys property with the 
> same name



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4692) JSON Field transformer for DIH

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4692?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4692:

   Priority: Minor  (was: Major)
Component/s: contrib - DataImportHandler
 Issue Type: Improvement  (was: Bug)

> JSON Field transformer for DIH
> --
>
> Key: SOLR-4692
> URL: https://issues.apache.org/jira/browse/SOLR-4692
> Project: Solr
>  Issue Type: Improvement
>  Components: contrib - DataImportHandler
>Reporter: Bill Bell
>Priority: Minor
> Attachments: JSONTransform.jar, JSONTransformer.java, xml.jar
>
>
> This works in conjunction with SOLR-4685.
> Takes an XML field from SQL / manually and adds it as a JSON field.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4681) Add spellcheck to default /select handler

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4681?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4681.
-
Resolution: Won't Fix

Resolving as Won't Fix since we've discussed ad nauseam what to do with default 
configs, etc., and we always return back to the same principle to leave it as 
empty and clean as possible, which is reflected in the comments here from 4+ 
years ago.

I could have left this open for the suggested enhancement that the UI grey out 
the spellcheck box if it's not configured, but that feels like an improvement 
rather than a bug and IMO should be another issue.

> Add spellcheck to default /select handler
> -
>
> Key: SOLR-4681
> URL: https://issues.apache.org/jira/browse/SOLR-4681
> Project: Solr
>  Issue Type: Bug
>Affects Versions: 4.2
>Reporter: Mark Bennett
> Attachments: SOLR-4681-with-default-select.patch
>
>
> In SOLR-4680 I put a patch to fix spellcheck for the /spell handler.
> This bug/patch does that and also adds spellcheck to the default /select 
> launch.  I'm putting it as a separate bug because I suspect some people may 
> have stronger feelings about adding a component to the default that everybody 
> uses.
> However, in Solr we DO expose the spellcheck box under /select, and it does 
> not work, so I really think we should spellcheck should be in the default.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4681) Add spellcheck to default /select handler

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4681?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4681.
---

> Add spellcheck to default /select handler
> -
>
> Key: SOLR-4681
> URL: https://issues.apache.org/jira/browse/SOLR-4681
> Project: Solr
>  Issue Type: Bug
>Affects Versions: 4.2
>Reporter: Mark Bennett
> Attachments: SOLR-4681-with-default-select.patch
>
>
> In SOLR-4680 I put a patch to fix spellcheck for the /spell handler.
> This bug/patch does that and also adds spellcheck to the default /select 
> launch.  I'm putting it as a separate bug because I suspect some people may 
> have stronger feelings about adding a component to the default that everybody 
> uses.
> However, in Solr we DO expose the spellcheck box under /select, and it does 
> not work, so I really think we should spellcheck should be in the default.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4674) repeaters must be configured to replicate on startup in order to work properly when replicating config files

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4674?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4674:

Component/s: replication (java)

> repeaters must be configured to replicate on startup in order to work 
> properly when replicating config files
> 
>
> Key: SOLR-4674
> URL: https://issues.apache.org/jira/browse/SOLR-4674
> Project: Solr
>  Issue Type: Bug
>  Components: replication (java)
>Reporter: Mark Miller
>
> Filing on Mark's behalf based on a mailing list thread about replication 
> tests related to automatic core reloads...
> {quote}
> Let's say you have 3 nodes, a master, a repeater, and a slave.
> When you do updates and commit on the master, things will replicate to the 
> repeater. You now need to make the repeaters most replicatable commit the 
> latest commit, even though a normal trigger for this (startup, commit) has 
> not occurred. If you don't, the right stuff won't happen between the repeater 
> and the slave.
> In the non core reload case, we currently reach right in the 
> ReplicationHandler and update the last replicatable commit point on the 
> repeater as part of installing the new index. This is somewhat new, there 
> used to be a commit that would push the slave gen past the leader by one. 
> In the Core reload case, it's a little trickier. If you are replicating on 
> startup, you should be fine - the right most replicatable commit will be set 
> when the core reloads. But if you don't, and just have replicate on commit, 
> the repeater won't be ready to replicate the right commit point to the slave.
> I guess the best workaround for that at the moment is to be sure to have 
> replicate on startup set on your repeater.
> {quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4673) semanticsMode consistency acts like classic-consistency-hybrid

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4673?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4673.
---

> semanticsMode consistency acts like classic-consistency-hybrid
> --
>
> Key: SOLR-4673
> URL: https://issues.apache.org/jira/browse/SOLR-4673
> Project: Solr
>  Issue Type: Bug
>  Components: update
>Affects Versions: 4.1
>Reporter: Sam Kass
>  Labels: concurrency, optimistic
>
> Code like the following, run with semanticsMode set to "consistency", should 
> fail on the second doc.  Instead, it's acting like 
> classic-consistency-hybrid, where it treats the second doc as if it should 
> always overwrite.  (Going by docs in 
> http://wiki.apache.org/solr/Per%20Steffensen/Update%20semantics as it's the 
> only place where this feature added 2 releases ago is as yet documented.)
> {quote}
> String id = UUID.randomUUID().toString();
> SolrInputDocument docA = new SolrInputDocument();
> docA.addField("id", id);
> docA.addField("_version_", -1);
> SolrInputDocument docB = new SolrInputDocument();
> docB.addField("id", id);
> // intentionally omit _version_
> solrServer.add(docA);
> solrServer.add(docB);
> {quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4673) semanticsMode consistency acts like classic-consistency-hybrid

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4673?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4673.
-
Resolution: Invalid

AFAICT, this bug report is against a feature that hasn't been committed 
(SOLR-3178) - there's nothing in current Lucene/Solr code base for 
{{semanticMode}}, the only place I found it mentioned beyond the wiki article 
(which also says it's not committed) is on SOLR-3178. Since the feature hasn't 
been committed, I'm not sure there's a reason to keep this issue forever. I 
linked the issues, though in case that feature gets picked back up again in the 
future.

> semanticsMode consistency acts like classic-consistency-hybrid
> --
>
> Key: SOLR-4673
> URL: https://issues.apache.org/jira/browse/SOLR-4673
> Project: Solr
>  Issue Type: Bug
>  Components: update
>Affects Versions: 4.1
>Reporter: Sam Kass
>  Labels: concurrency, optimistic
>
> Code like the following, run with semanticsMode set to "consistency", should 
> fail on the second doc.  Instead, it's acting like 
> classic-consistency-hybrid, where it treats the second doc as if it should 
> always overwrite.  (Going by docs in 
> http://wiki.apache.org/solr/Per%20Steffensen/Update%20semantics as it's the 
> only place where this feature added 2 releases ago is as yet documented.)
> {quote}
> String id = UUID.randomUUID().toString();
> SolrInputDocument docA = new SolrInputDocument();
> docA.addField("id", id);
> docA.addField("_version_", -1);
> SolrInputDocument docB = new SolrInputDocument();
> docB.addField("id", id);
> // intentionally omit _version_
> solrServer.add(docA);
> solrServer.add(docB);
> {quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Assigned] (SOLR-11770) NPE in tvrh if no field is specified and document doesn't contain any fields with term vectors

2018-01-10 Thread Erick Erickson (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-11770?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Erick Erickson reassigned SOLR-11770:
-

Assignee: Erick Erickson

> NPE in tvrh if no field is specified and document doesn't contain any fields 
> with term vectors
> --
>
> Key: SOLR-11770
> URL: https://issues.apache.org/jira/browse/SOLR-11770
> Project: Solr
>  Issue Type: Bug
>  Security Level: Public(Default Security Level. Issues are Public) 
>Affects Versions: 6.6.2
>Reporter: Nikolay Martynov
>Assignee: Erick Erickson
>
> It looks like if {{tvrh}} request doesn't contain {{fl}} parameter and 
> document doesn't have any fields with term vectors then Solr returns NPE.
> Request: 
> {{tvrh?shards.qt=/tvrh=field%3Avalue=json=id%3A123=true}}.
> On our 'old' schema we had some fields with {{termVectors}} and even more 
> fields with position data. In our new schema we tried to remove unused data 
> so we dropped a lot of position data and some term vectors.
> Our documents are 'sparsely' populated - not all documents contain all fields.
> Above request was returning fine for our 'old' schema and returns 500 for our 
> 'new' schema - on exactly same Solr (6.6.2).
> Stack trace:
> {code}
> 2017-12-18 01:15:00.958 ERROR (qtp255041198-46697) [c:test s:shard3 
> r:core_node11 x:test_shard3_replica1] o.a.s.h.RequestHandlerBase 
> java.lang.NullPointerException
>at 
> org.apache.solr.handler.component.TermVectorComponent.process(TermVectorComponent.java:324)
>at 
> org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:296)
>at 
> org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:173)
>at org.apache.solr.core.SolrCore.execute(SolrCore.java:2482)
>at org.apache.solr.servlet.HttpSolrCall.execute(HttpSolrCall.java:723)
>at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:529)
>at 
> org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:361)
>at 
> org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:305)
>at 
> org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1691)
>at 
> org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:582)
>at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
>at 
> org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:548)
>at 
> org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:226)
>at 
> org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1180)
>at 
> org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:512)
>at 
> org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185)
>at 
> org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1112)
>at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
>at 
> org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:213)
>at 
> org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:119)
>at 
> org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
>at 
> org.eclipse.jetty.rewrite.handler.RewriteHandler.handle(RewriteHandler.java:335)
>at 
> org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
>at org.eclipse.jetty.server.Server.handle(Server.java:534)
>at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:320)
>at 
> org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:251)
>at 
> org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:273)
>at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:95)
>at 
> org.eclipse.jetty.io.SelectChannelEndPoint$2.run(SelectChannelEndPoint.java:93)
>at 
> org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.executeProduceConsume(ExecuteProduceConsume.java:303)
>at 
> org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.produceConsume(ExecuteProduceConsume.java:148)
>at 
> org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.run(ExecuteProduceConsume.java:136)
>at 
> org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:671)
>at 
> org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:589)
>at java.lang.Thread.run(Thread.java:748)
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Updated] (SOLR-4669) conf file replication can cause new index to be loaded before new core (with new configs) is loaded.

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4669?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4669:

Component/s: replication (java)

> conf file replication can cause new index to be loaded before new core (with 
> new configs) is loaded.
> 
>
> Key: SOLR-4669
> URL: https://issues.apache.org/jira/browse/SOLR-4669
> Project: Solr
>  Issue Type: Bug
>  Components: replication (java)
>Reporter: Hoss Man
>
> Unless i'm smoking crack, some behavior i noticed working on SOLR-4629 
> indicates that when solr replication detects both a changed index, and 
> changed config files, the index is copied over and put into use by the 
> current solr core, then the conig files are copied over, and then the solr 
> core is reloaded with the modified configs.
> which means there is a window of time in which the "new" index is being 
> searched using the old configs -- which could have bizare consequences.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4647) Grouping is broken on docvalues-only fields

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4647?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4647:

Component/s: search

> Grouping is broken on docvalues-only fields
> ---
>
> Key: SOLR-4647
> URL: https://issues.apache.org/jira/browse/SOLR-4647
> Project: Solr
>  Issue Type: Bug
>  Components: search
>Affects Versions: 4.2
>Reporter: Adrien Grand
>  Labels: newdev
> Attachments: SOLR-4647.patch
>
>
> There are a few places where grouping uses 
> FieldType.toObject(SchemaField.createField(String, float)) to translate a 
> String field value to an Object. The problem is that createField returns null 
> when the field is neither stored nor indexed, even if it has doc values.
> An option to fix it could be to use the ValueSource instead to resolve the 
> Object value (similarily to NumericFacets).



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4644) SyncSliceTest often fails trying to setup an inconsistent state, generally only on Apache Jenkins.

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4644?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4644:

Component/s: Tests

> SyncSliceTest often fails trying to setup an inconsistent state, generally 
> only on Apache Jenkins.
> --
>
> Key: SOLR-4644
> URL: https://issues.apache.org/jira/browse/SOLR-4644
> Project: Solr
>  Issue Type: Bug
>  Components: Tests
>Reporter: Mark Miller
>Assignee: Mark Miller
> Fix For: 4.9, 6.0
>
>
> java.lang.AssertionError: Test Setup Failure: shard1 should have just been 
> set up to be inconsistent - but it's still consistent. 
> Leader:http://127.0.0.1:58076/gj_mz/in/collection1 Dead 
> Guy:http://127.0.0.1:64555/gj_mz/in/collection1skip list:[CloudJettyRunner 
> [url=http://127.0.0.1:18606/gj_mz/in/collection1], CloudJettyRunner 
> [url=http://127.0.0.1:10847/gj_mz/in/collection1]]



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Updated] (SOLR-4600) 400 Bad Request status should be returned if a query parameter has the wrong datatype

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4600?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4600:

Component/s: search

> 400 Bad Request status should be returned if a query parameter has the wrong 
> datatype
> -
>
> Key: SOLR-4600
> URL: https://issues.apache.org/jira/browse/SOLR-4600
> Project: Solr
>  Issue Type: Bug
>  Components: search
>Affects Versions: 4.2
>Reporter: Jack Krupansky
>
> Solr returns a 500 Server Error for the following query even though the error 
> is really a user error - wrong datatype for the  parameter:
> {code}
> curl "http://localhost:8983/solr/select/?q=*:*=all; -v
> {code}
> The  parameter of course expects an integer.
> Somebody should probably trap the raw number format exception and turn it 
> into a 400 Bad Request SolrException.
> The actual response:
> {code}
> Jack Krupansky@JackKrupansky ~ $ curl 
> "http://localhost:8983/solr/select/?q=*:*=all; -v
> * About to connect() to localhost port 8983 (#0)
> *   Trying 127.0.0.1...
> * connected
> * Connected to localhost (127.0.0.1) port 8983 (#0)
> > GET /solr/select/?q=*:*=all HTTP/1.1
> > User-Agent: curl/7.27.0
> > Host: localhost:8983
> > Accept: */*
> >
> * additional stuff not fine 
> /usr/src/ports/curl/curl-7.27.0-1/src/curl-7.27.0/lib/transfer.c:1037: 0 0
> * HTTP 1.1 or later with persistent connection, pipelining supported
> < HTTP/1.1 500 Server Error
> < Cache-Control: no-cache, no-store
> < Pragma: no-cache
> < Expires: Sat, 01 Jan 2000 01:00:00 GMT
> < Last-Modified: Sun, 17 Mar 2013 21:23:39 GMT
> < ETag: "13d7a3c83fb"
> < Content-Type: application/xml; charset=UTF-8
> < Transfer-Encoding: chunked
> <
> 
> 
> 500 name="QTime">1*:* name="rows">allFor input 
> string: "all"java.lang.NumberFormatException: For 
> input string: "all"
> at java.lang.NumberFormatException.forInputString(Unknown Source)
> at java.lang.Integer.parseInt(Unknown Source)
> at java.lang.Integer.parseInt(Unknown Source)
> at org.apache.solr.search.QParser.getSort(QParser.java:277)
> at 
> org.apache.solr.handler.component.QueryComponent.prepare(QueryComponent.java:123)
> at 
> org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:187)
> at 
> org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:135)
> at org.apache.solr.core.SolrCore.execute(SolrCore.java:1797)
> at 
> org.apache.solr.servlet.SolrDispatchFilter.execute(SolrDispatchFilter.java:637)
> at 
> org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:343)
> at 
> org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:141)
> at 
> org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1307)
> at 
> org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:453)
> at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:137)
> at 
> org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:560)
> at 
> org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:231)
> at 
> org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1072)
> at 
> org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:382)
> at 
> org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:193)
> at 
> org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1006)
> at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:135)
> at 
> org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:255)
> at 
> org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:154)
> at 
> org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:116)
> at org.eclipse.jetty.server.Server.handle(Server.java:365)
> at 
> org.eclipse.jetty.server.AbstractHttpConnection.handleRequest(AbstractHttpConnection.java:485)
> at 
> org.eclipse.jetty.server.BlockingHttpConnection.handleRequest(BlockingHttpConnection.java:53)
> at 
> org.eclipse.jetty.server.AbstractHttpConnection.headerComplete(AbstractHttpConnection.java:926)
> at 
> org.eclipse.jetty.server.AbstractHttpConnection$RequestHandler.headerComplete(AbstractHttpConnection.java:988)
> at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:635)
> at 
> org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235)
> at 
> 

[jira] [Updated] (SOLR-4588) Partial Update of Poly Field Corrupts Data

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4588?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett updated SOLR-4588:

Component/s: update
 Schema and Analysis

> Partial Update of Poly Field Corrupts Data
> --
>
> Key: SOLR-4588
> URL: https://issues.apache.org/jira/browse/SOLR-4588
> Project: Solr
>  Issue Type: Bug
>  Components: Schema and Analysis, update
>Affects Versions: 4.0, 4.2
>Reporter: John Crygier
>Priority: Minor
> Attachments: schema.xml
>
>
> When updating a field that is a poly type (Testing with LatLonType), when you 
> do a partial document update, the poly fields will become multi-valued.  This 
> occurs even when the field is configured to not be multi-valued.
> Test Case
> Use the attached schema (schema.xml)
> And issue the following commands (With responses):
> curl 'localhost:8983/solr/update?commit=true' -H 
> 'Content-type:application/json' -d '[{"id":"JohnTestDocument", 
> "JohnTestLatLon" : "0,0"}]'
> RESPONSE: {"responseHeader":{"status":0,"QTime":2133}}
> curl 'http://localhost:8983/solr/select?q=*%3A*=json=true'
> RESPONSE: {
>   "responseHeader":{
> "status":0,
> "QTime":2,
> "params":{
>   "indent":"true",
>   "q":"*:*",
>   "wt":"json"}},
>   "response":{"numFound":1,"start":0,"docs":[
>   {
> "id":"JohnTestDocument",
> "JohnTestLatLon_0_coordinate":0.0,
> "JohnTestLatLon_1_coordinate":0.0,
> "JohnTestLatLon":"0,0",
> "_version_":-1596981248}]
>   }}
>   
> curl 'localhost:8983/solr/update?commit=true' -H 
> 'Content-type:application/json' -d 
> '[{"id":"JohnTestDocument","JohnTestLatLon":{"set":"5,7"}}]'
> RESPONSE: {"responseHeader":{"status":0,"QTime":218}}
> curl 'http://localhost:8983/solr/select?q=*%3A*=json=true'
> RESPONSE: {
>   "responseHeader":{
> "status":0,
> "QTime":2,
> "params":{
>   "indent":"true",
>   "q":"*:*",
>   "wt":"json"}},
>   "response":{"numFound":1,"start":0,"docs":[
>   {
> "id":"JohnTestDocument",
> "JohnTestLatLon_0_coordinate":[0.0,
>   5.0],
> "JohnTestLatLon_1_coordinate":[0.0,
>   7.0],
> "JohnTestLatLon":"5,7",
> "_version_":-118489088}]
>   }}
> As you can see, the 0.0 hangs around in JohnTestLatLon_0_coordinate and 
> JohnTestLatLon_1_coordinate.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4579) A replicas lost 10 sequential documents, and we can't search these documents from this replicas forever

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4579?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4579.
-
Resolution: Cannot Reproduce

There isn't enough information here to try to reproduce this today. It's 
entirely possible other changes have been made that render this fixed, but 
there's no way to know for sure.

> A replicas lost 10 sequential documents, and we can't search these documents 
> from this replicas forever
> ---
>
> Key: SOLR-4579
> URL: https://issues.apache.org/jira/browse/SOLR-4579
> Project: Solr
>  Issue Type: Bug
>  Components: replication (java), SolrCloud, update
>Affects Versions: 4.0
> Environment: suse11
>Reporter: albert newbgirl
>
> We see this many times,and it lost 10 sequential documents every time 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4579) A replicas lost 10 sequential documents, and we can't search these documents from this replicas forever

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4579?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4579.
---

> A replicas lost 10 sequential documents, and we can't search these documents 
> from this replicas forever
> ---
>
> Key: SOLR-4579
> URL: https://issues.apache.org/jira/browse/SOLR-4579
> Project: Solr
>  Issue Type: Bug
>  Components: replication (java), SolrCloud, update
>Affects Versions: 4.0
> Environment: suse11
>Reporter: albert newbgirl
>
> We see this many times,and it lost 10 sequential documents every time 



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Closed] (SOLR-4560) Document updates do not honor deleted fields

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4560?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett closed SOLR-4560.
---

> Document updates do not honor deleted fields
> 
>
> Key: SOLR-4560
> URL: https://issues.apache.org/jira/browse/SOLR-4560
> Project: Solr
>  Issue Type: Bug
>  Components: update
>Affects Versions: 4.1
>Reporter: Neelesh Shastry
>
>  Steps to reproduce
> * Lets assume we have 3 fields
>  id, name,city
> * Create some documents 
> * Remove the field "city"
> * Fire an update on name
> curl http://localhost:8983/solr/update -H 'Content-type:application/json' -d '
> [
>  {
>   "id": "6",
>   "name" : {"set":"New Name"}
>  }]
>  '
>  
>  You can see
>  
>  {"responseHeader":{"status":400,"QTime":83},"error":{"msg":"ERROR: [doc=6] 
> unknown field 'city'","code":400}}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Resolved] (SOLR-4560) Document updates do not honor deleted fields

2018-01-10 Thread Cassandra Targett (JIRA)

 [ 
https://issues.apache.org/jira/browse/SOLR-4560?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cassandra Targett resolved SOLR-4560.
-
Resolution: Invalid

Marking this as Invalid since the behavior is not a bug: deleting a field from 
the schema doesn't delete it from the index, a reindex would be required if 
schema field definitions are changed.

> Document updates do not honor deleted fields
> 
>
> Key: SOLR-4560
> URL: https://issues.apache.org/jira/browse/SOLR-4560
> Project: Solr
>  Issue Type: Bug
>  Components: update
>Affects Versions: 4.1
>Reporter: Neelesh Shastry
>
>  Steps to reproduce
> * Lets assume we have 3 fields
>  id, name,city
> * Create some documents 
> * Remove the field "city"
> * Fire an update on name
> curl http://localhost:8983/solr/update -H 'Content-type:application/json' -d '
> [
>  {
>   "id": "6",
>   "name" : {"set":"New Name"}
>  }]
>  '
>  
>  You can see
>  
>  {"responseHeader":{"status":400,"QTime":83},"error":{"msg":"ERROR: [doc=6] 
> unknown field 'city'","code":400}}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread David Smiley (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320976#comment-16320976
 ] 

David Smiley commented on LUCENE-8126:
--

_(an aside: I wish the JIRA GitHub integration didn't put so much code context 
around the feedback text!)_

It's nice to see a new RPT SpatialPrefixTree implementation :-)  The API is a 
little crusty; perhaps sometime we could kick around some ideas to make it 
nicer.

It'll be interesting to see how well this performs.  This appears to be a 6-ary 
tree, as opposed to 4-ary (quad) or 32-ary (geohash).  One could build a 
variable arity prefixTree by the way (i.e. first level has 256, next 128, 
etc.), and I recently tweaked one of ours to do that (not contributed back yet).

For point data, the higher the arity, the smaller the index but slower search 
as it must scan more.

For non-point data, it's not clear since distErrPct caps the resolution of a 
shape relative to its size, and I believe (though not 100% sure) that it yields 
a roughly normal distribution around a certain number of cells (given fixed 
distErrPct, random shape type & size, near equator, random tree arity).  It'd 
be neat to empirically validate my theory.  If I'm right, then the optimal 
arity is probably 4 for non-point shapes, and we have two of those 
implementations. RE "near equator" above, see LUCENE-5056 though it has an easy 
fix in my last comment.

Given the way S2 divides a world into 6 sides recursively, it seems it would 
place shapes at a balanced depth in the tree no matter where in the world the 
data is.  That's a nice benefit... making the cell depth for a shape a bit more 
shallow than the probable depth in the other tree implementations (assuming a 
target precision for a given shape).  That's a bonus.

CC [~nknize] you may find this issue interesting

> Spatial prefix tree based on S2 geometry
> 
>
> Key: LUCENE-8126
> URL: https://issues.apache.org/jira/browse/LUCENE-8126
> Project: Lucene - Core
>  Issue Type: New Feature
>  Components: modules/spatial-extras
>Reporter: Ignacio Vera
>
> Hi [~dsmiley],
> I have been working on a prefix tree based on goggle S2 geometry 
> (https://s2geometry.io/) to be used mainly with Geo3d shapes with very 
> promising results, in particular for complex shapes (e.g polygons). Using 
> this pixelization scheme reduces the size of the index, improves the 
> performance of the queries and reduces the loading time for non-point shapes. 
> If you are ok with this contribution and before providing any code I would 
> like to understand what is the correct/prefered approach:
> 1) Add new depency to the S2 library 
> (https://mvnrepository.com/artifact/io.sgr/s2-geometry-library-java). It has 
> Apache 2.0 license so it should be ok.
> 2) Create a utility class with all methods necessary to navigate the S2 tree 
> and create shapes from S2 cells (basically port what we need from the library 
> into Lucene).
> What do you think?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[JENKINS-EA] Lucene-Solr-master-Linux (64bit/jdk-10-ea+37) - Build # 21254 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21254/
Java: 64bit/jdk-10-ea+37 -XX:+UseCompressedOops -XX:+UseSerialGC

1 tests failed.
FAILED:  
org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.testParse

Error Message:
Error from server at https://127.0.0.1:46159/solr: Collection : 
myalias_2017-10-24 is part of alias myalias remove or modify the alias before 
removing this collection.

Stack Trace:
org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error 
from server at https://127.0.0.1:46159/solr: Collection : myalias_2017-10-24 is 
part of alias myalias remove or modify the alias before removing this 
collection.
at 
__randomizedtesting.SeedInfo.seed([387666E7C83A742F:57F32DBF744C47C0]:0)
at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643)
at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
at 
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1104)
at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884)
at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817)
at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
at 
org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.doBefore(TimeRoutedAliasUpdateProcessorTest.java:84)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:968)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 

[JENKINS] Lucene-Solr-7.x-MacOSX (64bit/jdk-9) - Build # 392 - Still Unstable!

2018-01-10 Thread Policeman Jenkins Server
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-MacOSX/392/
Java: 64bit/jdk-9 -XX:-UseCompressedOops -XX:+UseParallelGC

5 tests failed.
FAILED:  org.apache.solr.cloud.CollectionsAPISolrJTest.testBalanceShardUnique

Error Message:
org/apache/solr/client/solrj/request/CollectionAdminRequest$BalanceShardUnique

Stack Trace:
java.lang.NoClassDefFoundError: 
org/apache/solr/client/solrj/request/CollectionAdminRequest$BalanceShardUnique
at 
__randomizedtesting.SeedInfo.seed([FDF6684DFDB9EF2E:B54EDE00471947ED]:0)
at 
org.apache.solr.client.solrj.request.CollectionAdminRequest.balanceReplicaProperty(CollectionAdminRequest.java:2043)
at 
org.apache.solr.cloud.CollectionsAPISolrJTest.testBalanceShardUnique(CollectionsAPISolrJTest.java:391)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:844)
Caused by: java.lang.ClassNotFoundException: 
org.apache.solr.client.solrj.request.CollectionAdminRequest$BalanceShardUnique
at 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320916#comment-16320916
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160769685
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320915#comment-16320915
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160769274
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320909#comment-16320909
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160766117
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320913#comment-16320913
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160768405
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320914#comment-16320914
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160773175
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java
 ---
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.geometry.S2CellId;
+import com.google.common.geometry.S2LatLng;
+import com.google.common.geometry.S2Projections;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.distance.DistanceUtils;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Shape;
+
+/**
+ * Spatial prefix tree for S2 Geometry. Shape factories for the given 
{@link SpatialContext} must
+ * implement the interface {@link S2ShapeFactory}.
+ *
+ * @lucene.experimental
+ */
+public class S2PrefixTree extends SpatialPrefixTree {
+
+/**
+ * Factory for creating {@link S2PrefixTree} instances with useful 
defaults
+ */
+public static class Factory extends SpatialPrefixTreeFactory {
+
+@Override
+protected int getLevelForDistance(double degrees) {
+S2PrefixTree grid = new S2PrefixTree(ctx, 
S2PrefixTree.MAX_LEVELS);
+return grid.getLevelForDistance(degrees);
+}
+
+@Override
+protected SpatialPrefixTree newSPT() {
+return new S2PrefixTree(ctx,
+maxLevels != null ? maxLevels : S2PrefixTree.MAX_LEVELS);
+}
+}
+
+//factory to generate S2 cell shapes
+protected final S2ShapeFactory s2ShapeFactory;
+public static final int MAX_LEVELS = S2CellId.MAX_LEVEL + 1;
+
+public S2PrefixTree(SpatialContext ctx, int maxLevels) {
+super(ctx, maxLevels);
+if (!(ctx.getShapeFactory() instanceof S2ShapeFactory)) {
+throw new IllegalArgumentException("Spatial context does not 
support S2 spatial index.");
+}
+this.s2ShapeFactory = (S2ShapeFactory) ctx.getShapeFactory();
+}
+
+@Override
+public int getLevelForDistance(double dist) {
+if (dist ==0){
+return maxLevels;
+}
+return Math.min(maxLevels, 
S2Projections.MAX_WIDTH.getClosestLevel(dist * 
DistanceUtils.DEGREES_TO_RADIANS) +1);
+}
+
+@Override
+public double getDistanceForLevel(int level) {
+return S2Projections.MAX_WIDTH.getValue(level -1) * 
DistanceUtils.RADIANS_TO_DEGREES;
--- End diff --

nitpick: put space after that minus operator


> Spatial prefix tree based on S2 geometry
> 
>
> Key: LUCENE-8126
> URL: https://issues.apache.org/jira/browse/LUCENE-8126
> Project: Lucene - Core
>  Issue Type: New Feature
>  Components: modules/spatial-extras
>Reporter: Ignacio Vera
>
> Hi [~dsmiley],
> I have been working on a prefix tree based on goggle S2 geometry 
> (https://s2geometry.io/) to be used mainly with Geo3d shapes with very 
> promising results, in particular for complex shapes (e.g polygons). Using 
> this pixelization scheme reduces the size of the index, improves the 
> performance of the queries and reduces the loading time for non-point shapes. 
> If you are ok with this contribution and before providing any code I would 
> like to understand what is the correct/prefered approach:
> 1) Add new depency to the S2 library 
> 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320917#comment-16320917
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160773587
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java
 ---
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.geometry.S2CellId;
+import com.google.common.geometry.S2LatLng;
+import com.google.common.geometry.S2Projections;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.distance.DistanceUtils;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Shape;
+
+/**
+ * Spatial prefix tree for S2 Geometry. Shape factories for the given 
{@link SpatialContext} must
+ * implement the interface {@link S2ShapeFactory}.
+ *
+ * @lucene.experimental
+ */
+public class S2PrefixTree extends SpatialPrefixTree {
+
+/**
+ * Factory for creating {@link S2PrefixTree} instances with useful 
defaults
+ */
+public static class Factory extends SpatialPrefixTreeFactory {
+
+@Override
+protected int getLevelForDistance(double degrees) {
+S2PrefixTree grid = new S2PrefixTree(ctx, 
S2PrefixTree.MAX_LEVELS);
+return grid.getLevelForDistance(degrees);
+}
+
+@Override
+protected SpatialPrefixTree newSPT() {
+return new S2PrefixTree(ctx,
+maxLevels != null ? maxLevels : S2PrefixTree.MAX_LEVELS);
+}
+}
+
+//factory to generate S2 cell shapes
+protected final S2ShapeFactory s2ShapeFactory;
+public static final int MAX_LEVELS = S2CellId.MAX_LEVEL + 1;
+
+public S2PrefixTree(SpatialContext ctx, int maxLevels) {
+super(ctx, maxLevels);
+if (!(ctx.getShapeFactory() instanceof S2ShapeFactory)) {
+throw new IllegalArgumentException("Spatial context does not 
support S2 spatial index.");
+}
+this.s2ShapeFactory = (S2ShapeFactory) ctx.getShapeFactory();
+}
+
+@Override
+public int getLevelForDistance(double dist) {
+if (dist ==0){
+return maxLevels;
+}
+return Math.min(maxLevels, 
S2Projections.MAX_WIDTH.getClosestLevel(dist * 
DistanceUtils.DEGREES_TO_RADIANS) +1);
+}
+
+@Override
+public double getDistanceForLevel(int level) {
+return S2Projections.MAX_WIDTH.getValue(level -1) * 
DistanceUtils.RADIANS_TO_DEGREES;
+}
+
+@Override
+public Cell getWorldCell() {
+return  new S2PrefixTreeCell(this, null);
+}
+
+@Override
+public Cell readCell(BytesRef term, Cell scratch) {
+S2PrefixTreeCell cell = (S2PrefixTreeCell) scratch;
+if (cell == null)
+cell = (S2PrefixTreeCell) getWorldCell();
+cell.readCell(this, term);
+return cell;
+}
+
+@Override
+public CellIterator getTreeCellIterator(Shape shape, int detailLevel) {
+if (!(shape instanceof Point)) {
+return  super.getTreeCellIterator(shape, detailLevel);
+}
+Point p = (Point) shape;
+S2CellId id = S2CellId.fromLatLng(S2LatLng.fromDegrees(p.getY(), 
p.getX())).parent(detailLevel-1);
+List cells = new ArrayList<>(detailLevel);
+for (int i=0; i < detailLevel -1; i++) {
--- End diff --

nitpick: put a space after that minus operator


> Spatial 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320912#comment-16320912
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160768053
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
--- End diff --

Since this map has a small set of fixed values that have numeric 
equivalents, perhaps we can do direct addressing into an array?


> Spatial prefix tree based on S2 geometry
> 
>
> Key: LUCENE-8126
> URL: https://issues.apache.org/jira/browse/LUCENE-8126
> Project: Lucene - Core
>  Issue Type: New Feature
>  Components: modules/spatial-extras
>Reporter: Ignacio Vera
>
> Hi [~dsmiley],
> I have been working on a prefix tree based on goggle S2 geometry 
> (https://s2geometry.io/) to be used mainly with Geo3d shapes with very 
> promising results, in particular for complex shapes (e.g polygons). Using 
> this pixelization scheme reduces the size of the index, improves the 
> performance of the queries and reduces the loading time for non-point shapes. 
> If you are ok with this contribution and before providing any code I would 
> like to understand what is the correct/prefered approach:
> 1) Add new depency to the S2 library 
> (https://mvnrepository.com/artifact/io.sgr/s2-geometry-library-java). It has 
> Apache 2.0 license so it should be ok.
> 2) Create a utility class with all methods necessary to navigate the S2 tree 
> and create shapes from S2 cells (basically port what we need from the library 
> into Lucene).
> What do you think?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

-
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org



[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320910#comment-16320910
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160768230
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java
 ---
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.geometry.S2CellId;
+import com.google.common.geometry.S2LatLng;
+import com.google.common.geometry.S2Projections;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.distance.DistanceUtils;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Shape;
+
+/**
+ * Spatial prefix tree for S2 Geometry. Shape factories for the given 
{@link SpatialContext} must
+ * implement the interface {@link S2ShapeFactory}.
+ *
+ * @lucene.experimental
+ */
+public class S2PrefixTree extends SpatialPrefixTree {
+
+/**
+ * Factory for creating {@link S2PrefixTree} instances with useful 
defaults
+ */
+public static class Factory extends SpatialPrefixTreeFactory {
+
+@Override
+protected int getLevelForDistance(double degrees) {
+S2PrefixTree grid = new S2PrefixTree(ctx, 
S2PrefixTree.MAX_LEVELS);
+return grid.getLevelForDistance(degrees);
+}
+
+@Override
+protected SpatialPrefixTree newSPT() {
+return new S2PrefixTree(ctx,
+maxLevels != null ? maxLevels : S2PrefixTree.MAX_LEVELS);
+}
+}
+
+//factory to generate S2 cell shapes
+protected final S2ShapeFactory s2ShapeFactory;
+public static final int MAX_LEVELS = S2CellId.MAX_LEVEL + 1;
+
+public S2PrefixTree(SpatialContext ctx, int maxLevels) {
+super(ctx, maxLevels);
+if (!(ctx.getShapeFactory() instanceof S2ShapeFactory)) {
+throw new IllegalArgumentException("Spatial context does not 
support S2 spatial index.");
+}
+this.s2ShapeFactory = (S2ShapeFactory) ctx.getShapeFactory();
+}
+
+@Override
+public int getLevelForDistance(double dist) {
+if (dist ==0){
+return maxLevels;
+}
+return Math.min(maxLevels, 
S2Projections.MAX_WIDTH.getClosestLevel(dist * 
DistanceUtils.DEGREES_TO_RADIANS) +1);
+}
+
+@Override
+public double getDistanceForLevel(int level) {
+return S2Projections.MAX_WIDTH.getValue(level -1) * 
DistanceUtils.RADIANS_TO_DEGREES;
+}
+
+@Override
+public Cell getWorldCell() {
+return  new S2PrefixTreeCell(this, null);
+}
+
+@Override
+public Cell readCell(BytesRef term, Cell scratch) {
+S2PrefixTreeCell cell = (S2PrefixTreeCell) scratch;
+if (cell == null)
+cell = (S2PrefixTreeCell) getWorldCell();
--- End diff --

nitpick: our code style in Lucene/Solr is to always use braces


> Spatial prefix tree based on S2 geometry
> 
>
> Key: LUCENE-8126
> URL: https://issues.apache.org/jira/browse/LUCENE-8126
> Project: Lucene - Core
>  Issue Type: New Feature
>  Components: modules/spatial-extras
>Reporter: Ignacio Vera
>
> Hi [~dsmiley],
> I have been working on a prefix tree based on goggle S2 geometry 
> (https://s2geometry.io/) to be used mainly with Geo3d shapes with very 
> promising results, in particular for complex shapes (e.g 

[jira] [Commented] (LUCENE-8126) Spatial prefix tree based on S2 geometry

2018-01-10 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/LUCENE-8126?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16320911#comment-16320911
 ] 

ASF GitHub Bot commented on LUCENE-8126:


Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160766597
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = 

[GitHub] lucene-solr pull request #302: LUCENE-8126: Spatial prefix tree based on S2 ...

2018-01-10 Thread dsmiley
Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160769685
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = new BytesRef();
+}
+getBytesRefFromS2CellId(cellId, result);
+return result;
+}
+
+@Override
+public int getLevel() {
+return this.level;
+}
+
+/**

[GitHub] lucene-solr pull request #302: LUCENE-8126: Spatial prefix tree based on S2 ...

2018-01-10 Thread dsmiley
Github user dsmiley commented on a diff in the pull request:

https://github.com/apache/lucene-solr/pull/302#discussion_r160768405
  
--- Diff: 
lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java
 ---
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.prefix.tree;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.geometry.S2CellId;
+import org.apache.lucene.util.BytesRef;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.SpatialRelation;
+
+/**
+ * This class represents a S2 pixel in the RPT.
+ *
+ * @lucene.internal
+ */
+class S2PrefixTreeCell implements Cell {
+
+//Faces of S2 Geometry
+private static S2CellId[] FACES = new S2CellId[6];
+static {
+FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0);
+FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0);
+FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0);
+FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0);
+FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0);
+FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0);
+}
+
+/*Special character to define a cell leaf*/
+private static final byte LEAF = '+';
+
+/*Tokens are used to serialize cells*/
+private static final byte[] TOKENS;
+/*Map containing mapping between tokens and integer values*/
+private static final Map PIXELS;
+static {
+TOKENS = new byte[]{'0', '1', '2', '3', '4', '5'};
+PIXELS = new HashMap<>(6);
+PIXELS.put(TOKENS[0], 0);
+PIXELS.put(TOKENS[1], 1);
+PIXELS.put(TOKENS[2], 2);
+PIXELS.put(TOKENS[3], 3);
+PIXELS.put(TOKENS[4], 4);
+PIXELS.put(TOKENS[5], 5);
+}
+
+S2CellId cellId;
+int level; //cache level
+S2PrefixTree tree;
+
+SpatialRelation shapeRel= null;
+boolean isLeaf;
+Shape shape = null;
+
+S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId){
+this.cellId= cellId;
+this.tree = tree;
+setLevel();
+if (getLevel() == tree.getMaxLevels()) {
+setLeaf();
+}
+}
+
+void readCell(S2PrefixTree tree, BytesRef ref){
+isLeaf = false;
+shape = null;
+shapeRel = null;
+this.tree = tree;
+cellId = getS2CellIdFromBytesRef(ref);
+setLevel();
+if (isLeaf(ref) || getLevel() == tree.getMaxLevels()){
+setLeaf();
+}
+}
+
+@Override
+public SpatialRelation getShapeRel() {
+return shapeRel;
+}
+
+@Override
+public void setShapeRel(SpatialRelation rel) {
+shapeRel = rel;
+}
+
+@Override
+public boolean isLeaf() {
+return isLeaf;
+}
+
+@Override
+public void setLeaf() {
+isLeaf = true;
+}
+
+@Override
+public BytesRef getTokenBytesWithLeaf(BytesRef result) {
+result = getTokenBytesNoLeaf(result);
+//max levels do not have leaf
+if (isLeaf() && !(getLevel() == tree.getMaxLevels())){
+//Add leaf byte
+result.bytes[result.offset + result.length] = LEAF;
+result.length++;
+}
+return result;
+}
+
+@Override
+public BytesRef getTokenBytesNoLeaf(BytesRef result) {
+if (result == null){
+result = new BytesRef();
+}
+getBytesRefFromS2CellId(cellId, result);
+return result;
+}
+
+@Override
+public int getLevel() {
+return this.level;
+}
+
+/**

  1   2   >