[JENKINS] Lucene-Solr-NightlyTests-8.x - Build # 210 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-8.x/210/ 1 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.hdfs.HdfsChaosMonkeySafeLeaderTest Error Message: ObjectTracker found 4 object(s) that were not released!!! [InternalHttpClient, NRTCachingDirectory, NRTCachingDirectory, SolrCore] org.apache.solr.common.util.ObjectReleaseTracker$ObjectTrackerException: org.apache.http.impl.client.InternalHttpClient at org.apache.solr.common.util.ObjectReleaseTracker.track(ObjectReleaseTracker.java:42) at org.apache.solr.client.solrj.impl.HttpClientUtil.createClient(HttpClientUtil.java:321) at org.apache.solr.client.solrj.impl.HttpClientUtil.createClient(HttpClientUtil.java:330) at org.apache.solr.handler.IndexFetcher.createHttpClient(IndexFetcher.java:230) at org.apache.solr.handler.IndexFetcher.(IndexFetcher.java:272) at org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:420) at org.apache.solr.cloud.RecoveryStrategy.replicate(RecoveryStrategy.java:250) at org.apache.solr.cloud.RecoveryStrategy.doSyncOrReplicateRecovery(RecoveryStrategy.java:662) at org.apache.solr.cloud.RecoveryStrategy.doRecovery(RecoveryStrategy.java:336) at org.apache.solr.cloud.RecoveryStrategy.run(RecoveryStrategy.java:317) at com.codahale.metrics.InstrumentedExecutorService$InstrumentedRunnable.run(InstrumentedExecutorService.java:181) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:210) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) org.apache.solr.common.util.ObjectReleaseTracker$ObjectTrackerException: org.apache.lucene.store.NRTCachingDirectory at org.apache.solr.common.util.ObjectReleaseTracker.track(ObjectReleaseTracker.java:42) at org.apache.solr.core.CachingDirectoryFactory.get(CachingDirectoryFactory.java:348) at org.apache.solr.update.SolrIndexWriter.create(SolrIndexWriter.java:99) at org.apache.solr.core.SolrCore.initIndex(SolrCore.java:805) at org.apache.solr.core.SolrCore.(SolrCore.java:1003) at org.apache.solr.core.SolrCore.(SolrCore.java:914) at org.apache.solr.core.CoreContainer.createFromDescriptor(CoreContainer.java:1252) at org.apache.solr.core.CoreContainer.create(CoreContainer.java:1163) at org.apache.solr.handler.admin.CoreAdminOperation.lambda$static$0(CoreAdminOperation.java:92) at org.apache.solr.handler.admin.CoreAdminOperation.execute(CoreAdminOperation.java:360) at org.apache.solr.handler.admin.CoreAdminHandler$CallInfo.call(CoreAdminHandler.java:396) at org.apache.solr.handler.admin.CoreAdminHandler.handleRequestBody(CoreAdminHandler.java:180) at org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:198) at org.apache.solr.servlet.HttpSolrCall.handleAdmin(HttpSolrCall.java:820) at org.apache.solr.servlet.HttpSolrCall.handleAdminRequest(HttpSolrCall.java:786) at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:546) at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:424) at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:351) at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1610) at org.apache.solr.client.solrj.embedded.JettySolrRunner$DebugFilter.doFilter(JettySolrRunner.java:167) at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1610) at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:540) at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255) at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1711) at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255) at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1347) at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203) at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:480) at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1678) at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201) at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1249) at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144) at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) at org.eclipse.jetty.rewrite.handler.RewriteHandler.handle(RewriteHandler.java:335) at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:703) at org.eclipse.jetty.server
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r324030543 ## File path: solr/core/src/java/org/apache/solr/store/blob/client/LocalStorageClient.java ## @@ -0,0 +1,259 @@ +package org.apache.solr.store.blob.client; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Class that handles reads and writes of solr blob files to the local file system. + */ +public class LocalStorageClient implements CoreStorageClient { + + /** The directory on the local file system where blobs will be stored. */ + public static final String BLOB_STORE_LOCAL_FS_ROOT_DIR_PROPERTY = "blob.local.dir"; + + private final String blobStoreRootDir = System.getProperty(BLOB_STORE_LOCAL_FS_ROOT_DIR_PROPERTY, "/tmp/BlobStoreLocal/"); + + public LocalStorageClient() throws IOException { +File rootDir = new File(blobStoreRootDir); +rootDir.mkdirs(); // Might create the directory... or not +if (!rootDir.isDirectory()) { + throw new IOException("Can't create local Blob root directory " + rootDir.getAbsolutePath()); +} + } + + private File getCoreRootDir(String blobName) { +return new File(BlobClientUtils.concatenatePaths(blobStoreRootDir, blobName)); + } + + @Override + public String pushStream(String blobName, InputStream is, long contentLength, String fileNamePrefix) throws BlobException { +try { + createCoreStorage(blobName); + String blobPath = createNewNonExistingBlob(blobName, fileNamePrefix); + + Files.copy(is, Paths.get(getBlobAbsolutePath(blobPath)), StandardCopyOption.REPLACE_EXISTING); + + assert new File(getBlobAbsolutePath(blobPath)).length() == contentLength; + + return blobPath; +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + /** + * Picks a unique name for a new blob for the given core. + * The current implementation creates a file, but eventually we just pick up a random blob name then delegate to S3... + * @return the blob file name, including the "path" part of the name + */ + private String createNewNonExistingBlob(String blobName, String fileNamePrefix) throws BlobException { +try { + String blobPath = BlobClientUtils.generateNewBlobCorePath(blobName, fileNamePrefix); + final File blobFile = new File(getBlobAbsolutePath(blobPath)); + if (blobFile.exists()) { +// Not expecting this ever to happen. In theory we could just do "continue" here to try a new +// name. For now throwing an exception to make sure we don't run into this... +// continue; +throw new IllegalStateException("The random file name chosen using UUID already exists. Very worrying! " + blobFile.getAbsolutePath()); + } + + return blobPath; +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + @Override + public InputStream pullStream(String blobPath) throws BlobException { +try { + File blobFile = new File(getBlobAbsolutePath(blobPath)); + return new FileInputStream(blobFile); +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + @Override + public void pushCoreMetadata(String sharedStoreName, String blobCoreMetadataName, BlobCoreMetadata bcm) throws BlobException { +try { + createCoreStorage(sharedStoreName); + ToFromJson converter = new ToFromJson<>(); + String json = converter.toJson(bcm); + + // Constant path under which the core metadata is stored in the Blob store (the only blob stored under a constant path!) + String blobMetadataPath = getBlobAbsolutePath(getBlobMetadataName(sharedStoreName, blobCoreMetadataName)); + final File blobMetadataFile = new File(blobMetadataPath); + + // Writing to the file assumed atomic, the file cannot be observed midway. Might not hold here but should be the case + // with a real S3 implementation. + try (PrintWriter out = new PrintWriter(blobMetadataFile)){ +out.println(json); + } +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + @Override + public BlobCoreMetadata pullCoreMetadata(String sharedStoreName, String blobCoreMetadataName) throws BlobException { +try { + if (!coreMetadataExists(sharedStoreName, blobCoreMetadataName)) { +return null; + } + + String blobMetadataPath = getBlobAbsolutePath(getBlobMetadataName(sharedStoreName, blobCoreMetadataName)); + File blobMetadataFile = new File(blobMetadataPath); + + String json = new
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r324027378 ## File path: solr/core/src/java/org/apache/solr/store/blob/client/BlobCoreMetadataBuilder.java ## @@ -0,0 +1,95 @@ +package org.apache.solr.store.blob.client; + +import java.util.*; Review comment: We usually avoid wildcard imports in Apache projects, iirc. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323982385 ## File path: solr/core/src/java/org/apache/solr/store/blob/process/CorePullTask.java ## @@ -0,0 +1,452 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.store.blob.process; + +import java.io.File; +import java.lang.invoke.MethodHandles; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.solr.cloud.ZkController; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; +import org.apache.solr.store.blob.client.BlobCoreMetadata; +import org.apache.solr.store.blob.client.CoreStorageClient; +import org.apache.solr.store.blob.metadata.CorePushPull; +import org.apache.solr.store.blob.metadata.ServerSideMetadata; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil.SharedMetadataResolutionResult; +import org.apache.solr.store.blob.process.CorePullerFeeder.PullCoreInfo; +import org.apache.solr.store.blob.provider.BlobStorageProvider; +import org.apache.solr.store.blob.util.BlobStoreUtils; +import org.apache.solr.store.blob.util.DeduplicatingList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Throwables; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +/** + * Code for pulling updates on a specific core to the Blob store. see {@CorePushTask} for the push version of this. + */ +public class CorePullTask implements DeduplicatingList.Deduplicatable { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + /** + * Minimum delay between to pull retries for a given core. Setting this higher than the push retry to reduce noise + * we get from a flood of queries for a stale core + * + * TODO: make configurable + */ + private static final long MIN_RETRY_DELAY_MS = 2; + + /** Cores currently being pulled and timestamp of pull start (to identify stuck ones in logs) */ + private static final HashMap pullsInFlight = Maps.newHashMap(); + + /** Cores unknown locally that got created as part of the pull process but for which no data has been pulled yet + * from Blob store. If we ignore this transitory state, these cores can be accessed locally and simply look empty. + * We'd rather treat threads attempting to access such cores like threads attempting to access an unknown core and + * do a pull (or more likely wait for an ongoing pull to finish). + * + * When this lock has to be taken as well as {@link #pullsInFlight}, then {@link #pullsInFlight} has to be taken first. + * Reading this set implies acquiring the monitor of the set (as if @GuardedBy("itself")), but writing to the set + * additionally implies holding the {@link #pullsInFlight}. This guarantees that while {@link #pullsInFlight} + * is held, no element in the set is changing. + */ + private static final Set coresCreatedNotPulledYet = Sets.newHashSet(); + + private final CoreContainer coreContainer; + private final PullCoreInfo pullCoreInfo; + private final long queuedTimeMs; + private int attempts; + private long lastAttemptTimestamp; + private final PullCoreCallback callback; + + CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, PullCoreCallback callback) { +this(coreContainer, pullCoreInfo, System.currentTimeMillis(), 0, 0L, callback); + } + + private CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, long queuedTimeMs, int attempts, + long lastAttemptTimestamp, PullCoreCallback callback) { +this.coreContainer = coreContainer; +this.pullCoreInfo = pullCoreInfo; +this.queuedTimeMs = queuedTimeMs; +this.attempts = attempts; +this.lastAttemptTimestamp = lastAttemptTimestamp; +this.callback = callback; + } + +
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323982385 ## File path: solr/core/src/java/org/apache/solr/store/blob/process/CorePullTask.java ## @@ -0,0 +1,452 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.store.blob.process; + +import java.io.File; +import java.lang.invoke.MethodHandles; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.solr.cloud.ZkController; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; +import org.apache.solr.store.blob.client.BlobCoreMetadata; +import org.apache.solr.store.blob.client.CoreStorageClient; +import org.apache.solr.store.blob.metadata.CorePushPull; +import org.apache.solr.store.blob.metadata.ServerSideMetadata; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil.SharedMetadataResolutionResult; +import org.apache.solr.store.blob.process.CorePullerFeeder.PullCoreInfo; +import org.apache.solr.store.blob.provider.BlobStorageProvider; +import org.apache.solr.store.blob.util.BlobStoreUtils; +import org.apache.solr.store.blob.util.DeduplicatingList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Throwables; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +/** + * Code for pulling updates on a specific core to the Blob store. see {@CorePushTask} for the push version of this. + */ +public class CorePullTask implements DeduplicatingList.Deduplicatable { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + /** + * Minimum delay between to pull retries for a given core. Setting this higher than the push retry to reduce noise + * we get from a flood of queries for a stale core + * + * TODO: make configurable + */ + private static final long MIN_RETRY_DELAY_MS = 2; + + /** Cores currently being pulled and timestamp of pull start (to identify stuck ones in logs) */ + private static final HashMap pullsInFlight = Maps.newHashMap(); + + /** Cores unknown locally that got created as part of the pull process but for which no data has been pulled yet + * from Blob store. If we ignore this transitory state, these cores can be accessed locally and simply look empty. + * We'd rather treat threads attempting to access such cores like threads attempting to access an unknown core and + * do a pull (or more likely wait for an ongoing pull to finish). + * + * When this lock has to be taken as well as {@link #pullsInFlight}, then {@link #pullsInFlight} has to be taken first. + * Reading this set implies acquiring the monitor of the set (as if @GuardedBy("itself")), but writing to the set + * additionally implies holding the {@link #pullsInFlight}. This guarantees that while {@link #pullsInFlight} + * is held, no element in the set is changing. + */ + private static final Set coresCreatedNotPulledYet = Sets.newHashSet(); + + private final CoreContainer coreContainer; + private final PullCoreInfo pullCoreInfo; + private final long queuedTimeMs; + private int attempts; + private long lastAttemptTimestamp; + private final PullCoreCallback callback; + + CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, PullCoreCallback callback) { +this(coreContainer, pullCoreInfo, System.currentTimeMillis(), 0, 0L, callback); + } + + private CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, long queuedTimeMs, int attempts, + long lastAttemptTimestamp, PullCoreCallback callback) { +this.coreContainer = coreContainer; +this.pullCoreInfo = pullCoreInfo; +this.queuedTimeMs = queuedTimeMs; +this.attempts = attempts; +this.lastAttemptTimestamp = lastAttemptTimestamp; +this.callback = callback; + } + +
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323967328 ## File path: solr/core/src/java/org/apache/solr/handler/admin/RequestApplyUpdatesOp.java ## @@ -68,4 +74,20 @@ public void execute(CoreAdminHandler.CallInfo it) throws Exception { if (it.req != null) it.req.close(); } } + + + private void pushToSharedStore(SolrCore core) { +// Push the index to blob storage before we set our state to ACTIVE +CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor(); +if (cloudDesc.getReplicaType().equals(Replica.Type.SHARED)) { Review comment: `Replica.Type.SHARED` is a enum so this line could be as below, right? ```suggestion if (cloudDesc.getReplicaType() == Replica.Type.SHARED) { ``` This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323982385 ## File path: solr/core/src/java/org/apache/solr/store/blob/process/CorePullTask.java ## @@ -0,0 +1,452 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.store.blob.process; + +import java.io.File; +import java.lang.invoke.MethodHandles; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.solr.cloud.ZkController; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; +import org.apache.solr.store.blob.client.BlobCoreMetadata; +import org.apache.solr.store.blob.client.CoreStorageClient; +import org.apache.solr.store.blob.metadata.CorePushPull; +import org.apache.solr.store.blob.metadata.ServerSideMetadata; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil.SharedMetadataResolutionResult; +import org.apache.solr.store.blob.process.CorePullerFeeder.PullCoreInfo; +import org.apache.solr.store.blob.provider.BlobStorageProvider; +import org.apache.solr.store.blob.util.BlobStoreUtils; +import org.apache.solr.store.blob.util.DeduplicatingList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Throwables; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +/** + * Code for pulling updates on a specific core to the Blob store. see {@CorePushTask} for the push version of this. + */ +public class CorePullTask implements DeduplicatingList.Deduplicatable { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + /** + * Minimum delay between to pull retries for a given core. Setting this higher than the push retry to reduce noise + * we get from a flood of queries for a stale core + * + * TODO: make configurable + */ + private static final long MIN_RETRY_DELAY_MS = 2; + + /** Cores currently being pulled and timestamp of pull start (to identify stuck ones in logs) */ + private static final HashMap pullsInFlight = Maps.newHashMap(); + + /** Cores unknown locally that got created as part of the pull process but for which no data has been pulled yet + * from Blob store. If we ignore this transitory state, these cores can be accessed locally and simply look empty. + * We'd rather treat threads attempting to access such cores like threads attempting to access an unknown core and + * do a pull (or more likely wait for an ongoing pull to finish). + * + * When this lock has to be taken as well as {@link #pullsInFlight}, then {@link #pullsInFlight} has to be taken first. + * Reading this set implies acquiring the monitor of the set (as if @GuardedBy("itself")), but writing to the set + * additionally implies holding the {@link #pullsInFlight}. This guarantees that while {@link #pullsInFlight} + * is held, no element in the set is changing. + */ + private static final Set coresCreatedNotPulledYet = Sets.newHashSet(); + + private final CoreContainer coreContainer; + private final PullCoreInfo pullCoreInfo; + private final long queuedTimeMs; + private int attempts; + private long lastAttemptTimestamp; + private final PullCoreCallback callback; + + CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, PullCoreCallback callback) { +this(coreContainer, pullCoreInfo, System.currentTimeMillis(), 0, 0L, callback); + } + + private CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, long queuedTimeMs, int attempts, + long lastAttemptTimestamp, PullCoreCallback callback) { +this.coreContainer = coreContainer; +this.pullCoreInfo = pullCoreInfo; +this.queuedTimeMs = queuedTimeMs; +this.attempts = attempts; +this.lastAttemptTimestamp = lastAttemptTimestamp; +this.callback = callback; + } + +
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r324026158 ## File path: solr/core/src/java/org/apache/solr/store/blob/client/BlobCoreMetadata.java ## @@ -0,0 +1,284 @@ +package org.apache.solr.store.blob.client; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; + +/** + * Object defining metadata stored in blob store for a Shared Collection shard and its builders. + * This metadata includes all actual segment files as well as the segments_N file of the commit point. + * + * This object is serialized to/from Json and stored in the blob store as a blob. + */ +public class BlobCoreMetadata { + +/** + * Name of the shard index data that is shared by all replicas belonging to that shard. This + * name is to decouple the core name that Solr manages from the name of the core on blob store. + */ +private final String sharedBlobName; + +/** + * Unique identifier of this metadata, that changes on every update to the metadata (except generating a new corrupt metadata + * through {@link #getCorruptOf}). + */ +private final String uniqueIdentifier; + +/** + * Indicates that a Solr (search) server pulled this core and was then unable to open or use it. This flag is used as + * an indication to servers pushing blobs for that core into Blob Store to push a complete set of files if they have + * a locally working copy rather than just diffs (files missing on Blob Store). + */ +private final boolean isCorrupt; + +/** + * Indicates that this core has been deleted by the client. This flag is used as a marker to prevent other servers + * from pushing their version of this core to blob and to allow local copy cleanup. + */ +private final boolean isDeleted; + +/** + * The array of files that constitute the current commit point of the core (as known by the Blob store). + * This array is not ordered! There are no duplicate entries in it either (see how it's built in {@link BlobCoreMetadataBuilder}). + */ +private final BlobFile[] blobFiles; + +/** + * Files marked for delete but not yet removed from the Blob store. Each such file contains information indicating when + * it was marked for delete so we can actually remove the corresponding blob (and the entry from this array in the metadata) + * when it's safe to do so even if there are (unexpected) conflicting updates to the blob store by multiple solr servers... + * TODO: we might want to separate the metadata blob with the deletes as it's not required to always fetch the delete list when checking freshness of local core... + */ +private final BlobFileToDelete[] blobFilesToDelete; + +/** + * This is the constructor called by {@link BlobCoreMetadataBuilder}. + * It always builds non "isCorrupt" and non "isDeleted" metadata. + * The only way to build an instance of "isCorrupt" metadata is to use {@link #getCorruptOf} and for "isDeleted" use {@link #getDeletedOf()} + */ +BlobCoreMetadata(String sharedBlobName, BlobFile[] blobFiles, BlobFileToDelete[] blobFilesToDelete) { +this(sharedBlobName, blobFiles, blobFilesToDelete, UUID.randomUUID().toString(), false, +false); +} + +private BlobCoreMetadata(String sharedBlobName, BlobFile[] blobFiles, BlobFileToDelete[] blobFilesToDelete, +String uniqueIdentifier, boolean isCorrupt, boolean isDeleted) { +this.sharedBlobName = sharedBlobName; +this.blobFiles = blobFiles; +this.blobFilesToDelete = blobFilesToDelete; +this.uniqueIdentifier = uniqueIdentifier; +this.isCorrupt = isCorrupt; +this.isDeleted = isDeleted; +} + +/** + * Given a non corrupt {@link BlobCoreMetadata} instance, creates an equivalent one based on it but marked as corrupt. + * The new instance keeps all the rest of the metadata unchanged, including the {@link #uniqueIdentifier}. + */ +public BlobCoreMetadata getCorruptOf() { +assert !isCorrupt; +return new BlobCoreMetadata(sharedBlobName, blobFiles, blobFilesToDelete, uniqueIdentifier, true, isDeleted); +} + +/** + * Given a {@link BlobCoreMetadata} instance, creates an equivalent one based on it but marked as deleted. + * + * The new instance keeps all the rest of the metadata unchanged, including the {@link #uniqueIdentifier}. + */ +public BlobCoreMetadata getDeletedOf() { +assert !isDeleted; +return new BlobCoreMetadata(sharedBlobName, blobFiles, blobFilesToDelete, uniqueIdentifier, isCorrupt, true); +} + +/** + * Returns true if the Blob metadata was marked as deleted + */ +public boolean getIsDeleted() { +return isDeleted; +} + +/** +
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323984413 ## File path: solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java ## @@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.Locale; Review comment: is this being used? This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323981156 ## File path: solr/core/src/java/org/apache/solr/store/blob/process/CorePullTask.java ## @@ -0,0 +1,452 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.store.blob.process; + +import java.io.File; +import java.lang.invoke.MethodHandles; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.solr.cloud.ZkController; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; +import org.apache.solr.store.blob.client.BlobCoreMetadata; +import org.apache.solr.store.blob.client.CoreStorageClient; +import org.apache.solr.store.blob.metadata.CorePushPull; +import org.apache.solr.store.blob.metadata.ServerSideMetadata; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil; +import org.apache.solr.store.blob.metadata.SharedStoreResolutionUtil.SharedMetadataResolutionResult; +import org.apache.solr.store.blob.process.CorePullerFeeder.PullCoreInfo; +import org.apache.solr.store.blob.provider.BlobStorageProvider; +import org.apache.solr.store.blob.util.BlobStoreUtils; +import org.apache.solr.store.blob.util.DeduplicatingList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Throwables; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +/** + * Code for pulling updates on a specific core to the Blob store. see {@CorePushTask} for the push version of this. + */ +public class CorePullTask implements DeduplicatingList.Deduplicatable { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + /** + * Minimum delay between to pull retries for a given core. Setting this higher than the push retry to reduce noise + * we get from a flood of queries for a stale core + * + * TODO: make configurable + */ + private static final long MIN_RETRY_DELAY_MS = 2; + + /** Cores currently being pulled and timestamp of pull start (to identify stuck ones in logs) */ + private static final HashMap pullsInFlight = Maps.newHashMap(); + + /** Cores unknown locally that got created as part of the pull process but for which no data has been pulled yet + * from Blob store. If we ignore this transitory state, these cores can be accessed locally and simply look empty. + * We'd rather treat threads attempting to access such cores like threads attempting to access an unknown core and + * do a pull (or more likely wait for an ongoing pull to finish). + * + * When this lock has to be taken as well as {@link #pullsInFlight}, then {@link #pullsInFlight} has to be taken first. + * Reading this set implies acquiring the monitor of the set (as if @GuardedBy("itself")), but writing to the set + * additionally implies holding the {@link #pullsInFlight}. This guarantees that while {@link #pullsInFlight} + * is held, no element in the set is changing. + */ + private static final Set coresCreatedNotPulledYet = Sets.newHashSet(); + + private final CoreContainer coreContainer; + private final PullCoreInfo pullCoreInfo; + private final long queuedTimeMs; + private int attempts; + private long lastAttemptTimestamp; + private final PullCoreCallback callback; + + CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, PullCoreCallback callback) { +this(coreContainer, pullCoreInfo, System.currentTimeMillis(), 0, 0L, callback); + } + + private CorePullTask(CoreContainer coreContainer, PullCoreInfo pullCoreInfo, long queuedTimeMs, int attempts, + long lastAttemptTimestamp, PullCoreCallback callback) { +this.coreContainer = coreContainer; +this.pullCoreInfo = pullCoreInfo; +this.queuedTimeMs = queuedTimeMs; +this.attempts = attempts; +this.lastAttemptTimestamp = lastAttemptTimestamp; +this.callback = callback; + } + +
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323974231 ## File path: solr/core/src/java/org/apache/solr/store/blob/provider/BlobStorageProvider.java ## @@ -0,0 +1,62 @@ +package org.apache.solr.store.blob.provider; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; + +import org.apache.solr.common.SolrException; +import org.apache.solr.store.blob.client.BlobException; +import org.apache.solr.store.blob.client.BlobStorageClientBuilder; +import org.apache.solr.store.blob.client.BlobstoreProviderType; +import org.apache.solr.store.blob.client.CoreStorageClient; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.amazonaws.SdkClientException; + +/** + * Class that provides access to the shared storage client (blob client) and + * handles initiation of such client. This class serves as the provider for all + * blob store communication channels. + */ +public class BlobStorageProvider { + + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private CoreStorageClient storageClient; Review comment: ```suggestion private volatile CoreStorageClient storageClient; ``` This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323972376 ## File path: solr/core/src/java/org/apache/solr/store/blob/client/S3StorageClient.java ## @@ -0,0 +1,385 @@ +package org.apache.solr.store.blob.client; + +import java.io.IOException; +import java.io.InputStream; +import java.util.*; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import org.apache.solr.common.StringUtils; +import org.apache.solr.util.FileUtils; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.AmazonServiceException; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.*; +import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion; +import com.google.common.collect.Iterables; + +import org.apache.solr.store.blob.client.BlobCoreMetadata; +import org.apache.solr.store.blob.client.BlobClientUtils; +import org.apache.solr.store.blob.client.ToFromJson; + +/** + * This class implements an AmazonS3 client for reading and writing search index + * data to AWS S3. + */ +public class S3StorageClient implements CoreStorageClient { + + private final AmazonS3 s3Client; + + /** The S3 bucket where we write all of our blobs to */ + private final String blobBucketName; + + // S3 has a hard limit of 1000 keys per batch delete request + private static final int MAX_KEYS_PER_BATCH_DELETE = 1000; + + /** + * Construct a new S3StorageClient that is an implementation of the + * CoreStorageClient using AWS S3 as the underlying blob store service provider. + */ + public S3StorageClient() throws IOException { +String credentialsFilePath = AmazonS3Configs.CREDENTIALS_FILE_PATH.getValue(); + +// requires credentials file on disk to authenticate with S3 +if (!FileUtils.fileExists(credentialsFilePath)) { + throw new IOException("Credentials file does not exist in " + credentialsFilePath); +} + +/* + * default s3 client builder loads credentials from disk and handles token refreshes + */ +AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); +s3Client = builder +.withPathStyleAccessEnabled(true) +.withRegion(Regions.fromName(AmazonS3Configs.REGION.getValue())) +.build(); + +blobBucketName = AmazonS3Configs.BUCKET_NAME.getValue(); + } + + @Override + public void pushCoreMetadata(String sharedStoreName, String blobCoreMetadataName, BlobCoreMetadata bcm) + throws BlobException { +try { + ToFromJson converter = new ToFromJson<>(); + String json = converter.toJson(bcm); + + String blobCoreMetadataPath = getBlobMetadataPath(sharedStoreName, blobCoreMetadataName); + /* + * Encodes contents of the string into an S3 object. If no exception is thrown + * then the object is guaranteed to have been stored + */ + s3Client.putObject(blobBucketName, blobCoreMetadataPath, json); +} catch (AmazonServiceException ase) { + throw handleAmazonServiceException(ase); +} catch (AmazonClientException ace) { + throw new BlobClientException(ace); +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + @Override + public BlobCoreMetadata pullCoreMetadata(String sharedStoreName, String blobCoreMetadataName) throws BlobException { +try { + String blobCoreMetadataPath = getBlobMetadataPath(sharedStoreName, blobCoreMetadataName); + + if (!coreMetadataExists(sharedStoreName, blobCoreMetadataName)) { +return null; + } + + String decodedJson = s3Client.getObjectAsString(blobBucketName, blobCoreMetadataPath); + ToFromJson converter = new ToFromJson<>(); + return converter.fromJson(decodedJson, BlobCoreMetadata.class); +} catch (AmazonServiceException ase) { + throw handleAmazonServiceException(ase); +} catch (AmazonClientException ace) { + throw new BlobClientException(ace); +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + @Override + public InputStream pullStream(String path) throws BlobException { +try { + S3Object requestedObject = s3Client.getObject(blobBucketName, path); + // This InputStream instance needs to be closed by the caller + return requestedObject.getObjectContent(); +} catch (AmazonServiceException ase) { + throw handleAmazonServiceException(ase); +} catch (AmazonClientException ace) { + throw new BlobClientException(ace); +} catch (Exception ex) { + throw new BlobException(ex); +} + } + + @Override + public String pushStream(String blobName, InputStream is, long contentLength, String fileNamePrefix) + throws BlobException { +try { + /* + * This object metadata is associated per
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323974906 ## File path: solr/core/src/java/org/apache/solr/store/blob/provider/BlobStorageProvider.java ## @@ -0,0 +1,62 @@ +package org.apache.solr.store.blob.provider; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; + +import org.apache.solr.common.SolrException; +import org.apache.solr.store.blob.client.BlobException; +import org.apache.solr.store.blob.client.BlobStorageClientBuilder; +import org.apache.solr.store.blob.client.BlobstoreProviderType; +import org.apache.solr.store.blob.client.CoreStorageClient; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.amazonaws.SdkClientException; + +/** + * Class that provides access to the shared storage client (blob client) and + * handles initiation of such client. This class serves as the provider for all + * blob store communication channels. + */ +public class BlobStorageProvider { + + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private CoreStorageClient storageClient; + + public CoreStorageClient getClient() { +if (storageClient != null) { + return storageClient; +} + +return getClient(BlobstoreProviderType.getConfiguredProvider()); + } + + private synchronized CoreStorageClient getClient(BlobstoreProviderType blobStorageProviderType) { +if (storageClient != null) { Review comment: Lines 37-39 duplicate lines 29-31. Maybe remove the redundant lines in the `getClient()` method? This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r323973484 ## File path: solr/core/src/java/org/apache/solr/store/blob/process/CorePullTracker.java ## @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.store.blob.process; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData; +import org.apache.solr.common.SolrException; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.common.cloud.ZkStateReader; +import org.apache.solr.common.util.Utils; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.SolrCore; +import org.apache.solr.servlet.SolrRequestParsers; +import org.apache.solr.store.blob.metadata.PushPullData; +import org.apache.solr.store.blob.process.CorePullerFeeder.PullCoreInfo; +import org.apache.solr.store.blob.util.BlobStoreUtils; +import org.apache.solr.store.blob.util.DeduplicatingList; +import org.apache.solr.store.shared.metadata.SharedShardMetadataController; + +import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tracks cores that are being queried and if necessary enqueues them for pull from blob store + */ +public class CorePullTracker { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + static private final int TRACKING_LIST_MAX_SIZE = 50; + + private final DeduplicatingList coresToPull; + + /* Config value that enables core pulls */ + @VisibleForTesting + public static boolean isBackgroundPullEnabled = true; // TODO : make configurable + + // Let's define these paths in yet another place in the code... + private static final String QUERY_PATH_PREFIX = "/select"; + private static final String SPELLCHECK_PATH_PREFIX = "/spellcheck"; + private static final String RESULTPROMOTION_PATH_PREFIX = "/result_promotion"; + private static final String INDEXLOOKUP_PATH_PREFIX = "/indexLookup"; + private static final String HIGHLIGHT_PATH_PREFIX = "/highlight"; + private static final String BACKUP_PATH_PREFIX = "/backup"; + + public CorePullTracker() { +coresToPull = new DeduplicatingList<>(TRACKING_LIST_MAX_SIZE, new CorePullerFeeder.PullCoreInfoMerger()); + } + + /** + * If the local core is stale, enqueues it to be pulled in from blob + * TODO: add stricter checks so that we don't pull on every request + */ + public void enqueueForPullIfNecessary(String requestPath, SolrCore core, String collectionName, + CoreContainer cores) throws IOException, SolrException { +// Initialize variables +String coreName = core.getName(); +String shardName = core.getCoreDescriptor().getCloudDescriptor().getShardId(); +SharedShardMetadataController sharedShardMetadataController = cores.getSharedStoreManager().getSharedShardMetadataController(); +DocCollection collection = cores.getZkController().getClusterState().getCollection(collectionName); + +Slice shard = collection.getSlicesMap().get(shardName); +if (shard != null) { + try { +if (!collection.getActiveSlices().contains(shard)) { + // unclear if there are side effects but logging for now + log.warn("Enqueueing a pull for shard " + shardName + " that is inactive!"); +} +log.info("Enqueue a pull for collection=" + collectionName + " shard=" + shardName + " coreName=" + coreName); +// creates the metadata node if it doesn't exist +sharedShardMetadataController.ensureMetadataNodeExists(collectionName, shardName); + +/* + * Get the metadataSuffix value from ZooKeeper or from a cache if an entry exists for the + * given collection and shardName. If the leader has already changed, the conditional update + * later will fail and invalidate the cache entry if it exists. + */ +VersionedD
[GitHub] [lucene-solr] eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud
eribeiro commented on a change in pull request #864: SOLR-13101 : Shared storage support in SolrCloud URL: https://github.com/apache/lucene-solr/pull/864#discussion_r324023752 ## File path: solr/core/src/java/org/apache/solr/store/shared/SharedStoreManager.java ## @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.store.shared; + +import org.apache.solr.cloud.ZkController; +import org.apache.solr.store.blob.process.BlobDeleteManager; +import org.apache.solr.store.blob.process.BlobProcessUtil; +import org.apache.solr.store.blob.process.CorePullTracker; +import org.apache.solr.store.blob.provider.BlobStorageProvider; +import org.apache.solr.store.shared.metadata.SharedShardMetadataController; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Provides access to Shared Store processes. Note that this class is meant to be + * more generic in the future and provide a cleaner API but for now we'll expose + * the underlying implementations + */ +public class SharedStoreManager { + + private ZkController zkController; + private SharedShardMetadataController sharedShardMetadataController; + private BlobStorageProvider blobStorageProvider; + private BlobDeleteManager blobDeleteManager; + private BlobProcessUtil blobProcessUtil; + private CorePullTracker corePullTracker; + + public SharedStoreManager(ZkController controller) { +zkController = controller; +// initialize BlobProcessUtil with the SharedStoreManager for background processes to be ready +blobProcessUtil = new BlobProcessUtil(zkController.getCoreContainer()); + } + + @VisibleForTesting + public void initBlobStorageProvider(BlobStorageProvider blobStorageProvider) { +this.blobStorageProvider = blobStorageProvider; + } + + /* + * Initiates a SharedShardMetadataController if it doesn't exist and returns one + */ + public SharedShardMetadataController getSharedShardMetadataController() { +if (sharedShardMetadataController != null) { Review comment: If this method (and the ones below) can be called by multiple threads then it can possibly hit a situation where two or more threads arrive at this line at the same time and `sharedShardMetadataController` is null and will create one or more objects. Does it make sense? Would it be the case of synchronizing the method? This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-NightlyTests-master - Build # 1959 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1959/ No tests ran. Build Log: [...truncated 25 lines...] ERROR: Failed to check out http://svn.apache.org/repos/asf/lucene/test-data org.tmatesoft.svn.core.SVNException: svn: E175002: connection refused by the server svn: E175002: OPTIONS request failed on '/repos/asf/lucene/test-data' at org.tmatesoft.svn.core.internal.wc.SVNErrorManager.error(SVNErrorManager.java:112) at org.tmatesoft.svn.core.internal.wc.SVNErrorManager.error(SVNErrorManager.java:96) at org.tmatesoft.svn.core.internal.io.dav.http.HTTPConnection.request(HTTPConnection.java:765) at org.tmatesoft.svn.core.internal.io.dav.http.HTTPConnection.request(HTTPConnection.java:352) at org.tmatesoft.svn.core.internal.io.dav.http.HTTPConnection.request(HTTPConnection.java:340) at org.tmatesoft.svn.core.internal.io.dav.DAVConnection.performHttpRequest(DAVConnection.java:910) at org.tmatesoft.svn.core.internal.io.dav.DAVConnection.exchangeCapabilities(DAVConnection.java:702) at org.tmatesoft.svn.core.internal.io.dav.DAVConnection.open(DAVConnection.java:113) at org.tmatesoft.svn.core.internal.io.dav.DAVRepository.openConnection(DAVRepository.java:1035) at org.tmatesoft.svn.core.internal.io.dav.DAVRepository.getLatestRevision(DAVRepository.java:164) at org.tmatesoft.svn.core.internal.wc2.ng.SvnNgRepositoryAccess.getRevisionNumber(SvnNgRepositoryAccess.java:119) at org.tmatesoft.svn.core.internal.wc2.SvnRepositoryAccess.getLocations(SvnRepositoryAccess.java:178) at org.tmatesoft.svn.core.internal.wc2.ng.SvnNgRepositoryAccess.createRepositoryFor(SvnNgRepositoryAccess.java:43) at org.tmatesoft.svn.core.internal.wc2.ng.SvnNgAbstractUpdate.checkout(SvnNgAbstractUpdate.java:831) at org.tmatesoft.svn.core.internal.wc2.ng.SvnNgCheckout.run(SvnNgCheckout.java:26) at org.tmatesoft.svn.core.internal.wc2.ng.SvnNgCheckout.run(SvnNgCheckout.java:11) at org.tmatesoft.svn.core.internal.wc2.ng.SvnNgOperationRunner.run(SvnNgOperationRunner.java:20) at org.tmatesoft.svn.core.internal.wc2.SvnOperationRunner.run(SvnOperationRunner.java:21) at org.tmatesoft.svn.core.wc2.SvnOperationFactory.run(SvnOperationFactory.java:1239) at org.tmatesoft.svn.core.wc2.SvnOperation.run(SvnOperation.java:294) at hudson.scm.subversion.CheckoutUpdater$SubversionUpdateTask.perform(CheckoutUpdater.java:133) at hudson.scm.subversion.WorkspaceUpdater$UpdateTask.delegateTo(WorkspaceUpdater.java:168) at hudson.scm.subversion.WorkspaceUpdater$UpdateTask.delegateTo(WorkspaceUpdater.java:176) at hudson.scm.subversion.UpdateUpdater$TaskImpl.perform(UpdateUpdater.java:134) at hudson.scm.subversion.WorkspaceUpdater$UpdateTask.delegateTo(WorkspaceUpdater.java:168) at hudson.scm.SubversionSCM$CheckOutTask.perform(SubversionSCM.java:1041) at hudson.scm.SubversionSCM$CheckOutTask.invoke(SubversionSCM.java:1017) at hudson.scm.SubversionSCM$CheckOutTask.invoke(SubversionSCM.java:990) at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3086) at hudson.remoting.UserRequest.perform(UserRequest.java:212) at hudson.remoting.UserRequest.perform(UserRequest.java:54) at hudson.remoting.Request$2.run(Request.java:369) at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:744) Caused by: java.net.ConnectException: Connection refused at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:345) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:589) at org.tmatesoft.svn.core.internal.util.SVNSocketConnection.run(SVNSocketConnection.java:57) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ... 4 more java.net.ConnectException: Connection refused at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:345) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
[jira] [Assigned] (SOLR-13125) Optimize Queries when sorting by router.field
[ https://issues.apache.org/jira/browse/SOLR-13125?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Gus Heck reassigned SOLR-13125: --- Assignee: Gus Heck > Optimize Queries when sorting by router.field > - > > Key: SOLR-13125 > URL: https://issues.apache.org/jira/browse/SOLR-13125 > Project: Solr > Issue Type: Sub-task >Reporter: mosh >Assignee: Gus Heck >Priority: Minor > Attachments: SOLR-13125-no-commit.patch, SOLR-13125.patch, > SOLR-13125.patch, SOLR-13125.patch > > Time Spent: 10m > Remaining Estimate: 0h > > We are currently testing TRA using Solr 7.7, having >300 shards in the alias, > with much growth in the coming months. > The "hot" data(in our case, more recent) will be stored on stronger > nodes(SSD, more RAM, etc). > A proposal of optimizing queries sorted by router.field(the field which TRA > uses to route the data to the correct collection) has emerged. > Perhaps, in queries which are sorted by router.field, Solr could be smart > enough to wait for the more recent collections, and in case the limit was > reached cancel other queries(or just not block and wait for the results)? > For example: > When querying a TRA which with a filter on a different field than > router.field, but sorting by router.field desc, limit=100. > Since this is a TRA, solr will issue queries for all the collections in the > alias. > But to optimize this particular type of query, Solr could wait for the most > recent collection in the TRA, see whether the result set matches or exceeds > the limit. If so, the query could be returned to the user without waiting for > the rest of the shards. If not, the issuing node will block until the second > query returns, and so forth, until the limit of the request is reached. > This might also be useful for deep paging, querying each collection and only > skipping to the next once there are no more results in the specified > collection. > Thoughts or inputs are always welcome. > This is just my two cents, and I'm always happy to brainstorm. > Thanks in advance. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-8.x - Build # 569 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-Tests-8.x/569/ No tests ran. Build Log: [...truncated 13593 lines...] ERROR: command execution failed. ERROR: Step ‘Archive the artifacts’ failed: no workspace for Lucene-Solr-Tests-8.x #569 ERROR: Step ‘Publish JUnit test result report’ failed: no workspace for Lucene-Solr-Tests-8.x #569 ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) Email was triggered for: Failure - Any Sending email for trigger: Failure - Any ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) ERROR: lucene is offline; cannot locate JDK 1.8 (latest) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-13094) NPE while doing regular Facet
[ https://issues.apache.org/jira/browse/SOLR-13094?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16928892#comment-16928892 ] Jay commented on SOLR-13094: This is blocking our upgrade to solr 8, we are currently on 7.7.2 but still using trie field as a alternative solution. FWIW, had the same problem in solr 6 also. [https://lucene.472066.n3.nabble.com/solr-6-6-3-intermittent-group-faceting-errors-td4385692.html#a4385865]. > NPE while doing regular Facet > - > > Key: SOLR-13094 > URL: https://issues.apache.org/jira/browse/SOLR-13094 > Project: Solr > Issue Type: Bug > Components: Facet Module >Affects Versions: 7.5.0 >Reporter: Amrit Sarkar >Priority: Major > > I am issuing a regular facet query: > {code} > params = new ModifiableSolrParams() > .add("q", query.trim()) > .add("rows", "0") > .add("facet", "true") > .add("facet.field", "description") > .add("facet.limit", "200"); > {code} > Exception: > {code} > 2018-12-24 15:50:20.843 ERROR (qtp690521419-130) [c:wiki s:shard2 > r:core_node4 x:wiki_shard2_replica_n2] o.a.s.s.HttpSolrCall > null:org.apache.solr.common.SolrException: Exception during facet.field: > description > at > org.apache.solr.request.SimpleFacets.lambda$getFacetFieldCounts$0(SimpleFacets.java:832) > at java.util.concurrent.FutureTask.run(FutureTask.java:266) > at org.apache.solr.request.SimpleFacets$3.execute(SimpleFacets.java:765) > at > org.apache.solr.request.SimpleFacets.getFacetFieldCounts(SimpleFacets.java:841) > at > org.apache.solr.handler.component.FacetComponent.getFacetCounts(FacetComponent.java:329) > at > org.apache.solr.handler.component.FacetComponent.process(FacetComponent.java:273) > at > org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:298) > at > org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:199) > at org.apache.solr.core.SolrCore.execute(SolrCore.java:2541) > at org.apache.solr.servlet.HttpSolrCall.execute(HttpSolrCall.java:709) > at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:515) > at > org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:377) > at > org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:323) > at > org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1634) > at > org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533) > at > org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:146) > at > org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:548) > at > org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:257) > at > org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255) > at > org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1317) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203) > at > org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473) > at > org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201) > at > org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1219) > at > org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144) > at > org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219) > at > org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126) > at > org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) > at > org.eclipse.jetty.rewrite.handler.RewriteHandler.handle(RewriteHandler.java:335) > at > org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) > at org.eclipse.jetty.server.Server.handle(Server.java:531) > at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352) > at > org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260) > at > org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281) > at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102) > at org.eclipse.jetty.io.ChannelEn
[jira] [Commented] (SOLR-13094) NPE while doing regular Facet
[ https://issues.apache.org/jira/browse/SOLR-13094?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16928887#comment-16928887 ] Jay commented on SOLR-13094: [~sarkaramr...@gmail.com]: I am seeing similar error when running group faceting on long field also. [https://lucene.472066.n3.nabble.com/Solr-7-7-group-faceting-errors-td4429996.html]. > NPE while doing regular Facet > - > > Key: SOLR-13094 > URL: https://issues.apache.org/jira/browse/SOLR-13094 > Project: Solr > Issue Type: Bug > Components: Facet Module >Affects Versions: 7.5.0 >Reporter: Amrit Sarkar >Priority: Major > > I am issuing a regular facet query: > {code} > params = new ModifiableSolrParams() > .add("q", query.trim()) > .add("rows", "0") > .add("facet", "true") > .add("facet.field", "description") > .add("facet.limit", "200"); > {code} > Exception: > {code} > 2018-12-24 15:50:20.843 ERROR (qtp690521419-130) [c:wiki s:shard2 > r:core_node4 x:wiki_shard2_replica_n2] o.a.s.s.HttpSolrCall > null:org.apache.solr.common.SolrException: Exception during facet.field: > description > at > org.apache.solr.request.SimpleFacets.lambda$getFacetFieldCounts$0(SimpleFacets.java:832) > at java.util.concurrent.FutureTask.run(FutureTask.java:266) > at org.apache.solr.request.SimpleFacets$3.execute(SimpleFacets.java:765) > at > org.apache.solr.request.SimpleFacets.getFacetFieldCounts(SimpleFacets.java:841) > at > org.apache.solr.handler.component.FacetComponent.getFacetCounts(FacetComponent.java:329) > at > org.apache.solr.handler.component.FacetComponent.process(FacetComponent.java:273) > at > org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:298) > at > org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:199) > at org.apache.solr.core.SolrCore.execute(SolrCore.java:2541) > at org.apache.solr.servlet.HttpSolrCall.execute(HttpSolrCall.java:709) > at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:515) > at > org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:377) > at > org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:323) > at > org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1634) > at > org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533) > at > org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:146) > at > org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:548) > at > org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:257) > at > org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255) > at > org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1317) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203) > at > org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473) > at > org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564) > at > org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201) > at > org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1219) > at > org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144) > at > org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219) > at > org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126) > at > org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) > at > org.eclipse.jetty.rewrite.handler.RewriteHandler.handle(RewriteHandler.java:335) > at > org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) > at org.eclipse.jetty.server.Server.handle(Server.java:531) > at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352) > at > org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260) > at > org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281) > at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102) > at org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118) > at > org.eclipse.jetty.util.thread.strategy.Ea
[jira] [Commented] (SOLR-13714) Incorrect shardHandlerFactory config element documented in refguide for "distributed requests"
[ https://issues.apache.org/jira/browse/SOLR-13714?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16928809#comment-16928809 ] ASF subversion and git services commented on SOLR-13714: Commit 0ad31d471e89ced0d074f37f9bc584180a362135 in lucene-solr's branch refs/heads/branch_8_2 from Michael Gibney [ https://gitbox.apache.org/repos/asf?p=lucene-solr.git;h=0ad31d4 ] SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig.xml element (#843) > Incorrect shardHandlerFactory config element documented in refguide for > "distributed requests" > -- > > Key: SOLR-13714 > URL: https://issues.apache.org/jira/browse/SOLR-13714 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: documentation >Affects Versions: 7.7.2, 8.1.1 >Reporter: Michael Gibney >Priority: Trivial > Time Spent: 0.5h > Remaining Estimate: 0h > > Reference guide documentation is inconsistent with respect to configuration > of {{shardHandlerFactory}} in {{solrconfig.xml}}. > The correct config element name is "{{shardHandlerFactory}}", as reflected in > code [in > SolrXmlConfig.java|https://github.com/apache/lucene-solr/blob/301ea0e/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java#L460] > and [in > SearchHandler.java|https://github.com/apache/lucene-solr/blob/43fc05c/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java#L97]. > The element name is documented correctly in the [refGuide page for "Format of > solr.xml"|https://lucene.apache.org/solr/guide/8_1/format-of-solr-xml.html#the-shardhandlerfactory-element], > but it is documented incorrectly (as "{{shardHandler}}", not > "{{shardHandlerFactory}}" in the [refGuide page for "Distributed > Requests"|https://lucene.apache.org/solr/guide/8_1/distributed-requests.html#configuring-the-shardhandlerfactory]. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-13714) Incorrect shardHandlerFactory config element documented in refguide for "distributed requests"
[ https://issues.apache.org/jira/browse/SOLR-13714?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Cassandra Targett resolved SOLR-13714. -- Fix Version/s: 8.2 Assignee: Cassandra Targett Resolution: Fixed > Incorrect shardHandlerFactory config element documented in refguide for > "distributed requests" > -- > > Key: SOLR-13714 > URL: https://issues.apache.org/jira/browse/SOLR-13714 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: documentation >Affects Versions: 7.7.2, 8.1.1 >Reporter: Michael Gibney >Assignee: Cassandra Targett >Priority: Trivial > Fix For: 8.2 > > Time Spent: 0.5h > Remaining Estimate: 0h > > Reference guide documentation is inconsistent with respect to configuration > of {{shardHandlerFactory}} in {{solrconfig.xml}}. > The correct config element name is "{{shardHandlerFactory}}", as reflected in > code [in > SolrXmlConfig.java|https://github.com/apache/lucene-solr/blob/301ea0e/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java#L460] > and [in > SearchHandler.java|https://github.com/apache/lucene-solr/blob/43fc05c/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java#L97]. > The element name is documented correctly in the [refGuide page for "Format of > solr.xml"|https://lucene.apache.org/solr/guide/8_1/format-of-solr-xml.html#the-shardhandlerfactory-element], > but it is documented incorrectly (as "{{shardHandler}}", not > "{{shardHandlerFactory}}" in the [refGuide page for "Distributed > Requests"|https://lucene.apache.org/solr/guide/8_1/distributed-requests.html#configuring-the-shardhandlerfactory]. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-13714) Incorrect shardHandlerFactory config element documented in refguide for "distributed requests"
[ https://issues.apache.org/jira/browse/SOLR-13714?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16928808#comment-16928808 ] ASF subversion and git services commented on SOLR-13714: Commit ca25f9f57383a99c03cb784cae4e7ab7dbe17b1c in lucene-solr's branch refs/heads/branch_8x from Michael Gibney [ https://gitbox.apache.org/repos/asf?p=lucene-solr.git;h=ca25f9f ] SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig.xml element (#843) > Incorrect shardHandlerFactory config element documented in refguide for > "distributed requests" > -- > > Key: SOLR-13714 > URL: https://issues.apache.org/jira/browse/SOLR-13714 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: documentation >Affects Versions: 7.7.2, 8.1.1 >Reporter: Michael Gibney >Priority: Trivial > Time Spent: 0.5h > Remaining Estimate: 0h > > Reference guide documentation is inconsistent with respect to configuration > of {{shardHandlerFactory}} in {{solrconfig.xml}}. > The correct config element name is "{{shardHandlerFactory}}", as reflected in > code [in > SolrXmlConfig.java|https://github.com/apache/lucene-solr/blob/301ea0e/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java#L460] > and [in > SearchHandler.java|https://github.com/apache/lucene-solr/blob/43fc05c/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java#L97]. > The element name is documented correctly in the [refGuide page for "Format of > solr.xml"|https://lucene.apache.org/solr/guide/8_1/format-of-solr-xml.html#the-shardhandlerfactory-element], > but it is documented incorrectly (as "{{shardHandler}}", not > "{{shardHandlerFactory}}" in the [refGuide page for "Distributed > Requests"|https://lucene.apache.org/solr/guide/8_1/distributed-requests.html#configuring-the-shardhandlerfactory]. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-13714) Incorrect shardHandlerFactory config element documented in refguide for "distributed requests"
[ https://issues.apache.org/jira/browse/SOLR-13714?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16928803#comment-16928803 ] ASF subversion and git services commented on SOLR-13714: Commit 0ce635ec01e9d3ce04a5fbf5d472ea9d5d28bfee in lucene-solr's branch refs/heads/master from Michael Gibney [ https://gitbox.apache.org/repos/asf?p=lucene-solr.git;h=0ce635e ] SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig.xml element (#843) > Incorrect shardHandlerFactory config element documented in refguide for > "distributed requests" > -- > > Key: SOLR-13714 > URL: https://issues.apache.org/jira/browse/SOLR-13714 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: documentation >Affects Versions: 7.7.2, 8.1.1 >Reporter: Michael Gibney >Priority: Trivial > Time Spent: 0.5h > Remaining Estimate: 0h > > Reference guide documentation is inconsistent with respect to configuration > of {{shardHandlerFactory}} in {{solrconfig.xml}}. > The correct config element name is "{{shardHandlerFactory}}", as reflected in > code [in > SolrXmlConfig.java|https://github.com/apache/lucene-solr/blob/301ea0e/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java#L460] > and [in > SearchHandler.java|https://github.com/apache/lucene-solr/blob/43fc05c/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java#L97]. > The element name is documented correctly in the [refGuide page for "Format of > solr.xml"|https://lucene.apache.org/solr/guide/8_1/format-of-solr-xml.html#the-shardhandlerfactory-element], > but it is documented incorrectly (as "{{shardHandler}}", not > "{{shardHandlerFactory}}" in the [refGuide page for "Distributed > Requests"|https://lucene.apache.org/solr/guide/8_1/distributed-requests.html#configuring-the-shardhandlerfactory]. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] ctargett merged pull request #843: SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig…
ctargett merged pull request #843: SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig… URL: https://github.com/apache/lucene-solr/pull/843 This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] ctargett commented on issue #843: SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig…
ctargett commented on issue #843: SOLR-13714: Correct refguide regarding shardHandlerFactory solrconfig… URL: https://github.com/apache/lucene-solr/pull/843#issuecomment-530944197 +1, thanks for fixing this. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-SmokeRelease-master - Build # 1450 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-master/1450/ No tests ran. Build Log: [...truncated 24519 lines...] [asciidoctor:convert] asciidoctor: ERROR: about-this-guide.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.) [asciidoctor:convert] asciidoctor: ERROR: solr-glossary.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.) [java] Processed 2595 links (2121 relative) to 3660 anchors in 260 files [echo] Validated Links & Anchors via: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/solr/build/solr-ref-guide/bare-bones-html/ -dist-changes: [copy] Copying 4 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/solr/package/changes package: -unpack-solr-tgz: -ensure-solr-tgz-exists: [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/solr/build/solr.tgz.unpacked [untar] Expanding: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/solr/package/solr-9.0.0.tgz into /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/solr/build/solr.tgz.unpacked generate-maven-artifacts: resolve: resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disal
[JENKINS] Lucene-Solr-Tests-master - Build # 3721 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/3721/ All tests passed Build Log: [...truncated 64664 lines...] -ecj-javadoc-lint-src: [mkdir] Created dir: /tmp/ecj644621066 [ecj-lint] Compiling 69 source files to /tmp/ecj644621066 [ecj-lint] invalid Class-Path header in manifest of jar file: /home/jenkins/.ivy2/cache/org.restlet.jee/org.restlet/jars/org.restlet-2.3.0.jar [ecj-lint] invalid Class-Path header in manifest of jar file: /home/jenkins/.ivy2/cache/org.restlet.jee/org.restlet.ext.servlet/jars/org.restlet.ext.servlet-2.3.0.jar [ecj-lint] -- [ecj-lint] 1. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java (at line 28) [ecj-lint] import javax.naming.InitialContext; [ecj-lint]^^^ [ecj-lint] The type javax.naming.InitialContext is not accessible [ecj-lint] -- [ecj-lint] 2. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java (at line 29) [ecj-lint] import javax.naming.NamingException; [ecj-lint] [ecj-lint] The type javax.naming.NamingException is not accessible [ecj-lint] -- [ecj-lint] 3. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java (at line 182) [ecj-lint] c = getFromJndi(initProps, jndiName); [ecj-lint] ^^^ [ecj-lint] The method getFromJndi(Properties, String) from the type new Callable(){} refers to the missing type NamingException [ecj-lint] -- [ecj-lint] 4. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java (at line 245) [ecj-lint] private Connection getFromJndi(final Properties initProps, final String jndiName) throws NamingException, [ecj-lint] ^^^ [ecj-lint] NamingException cannot be resolved to a type [ecj-lint] -- [ecj-lint] 5. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java (at line 249) [ecj-lint] InitialContext ctx = new InitialContext(); [ecj-lint] ^^ [ecj-lint] InitialContext cannot be resolved to a type [ecj-lint] -- [ecj-lint] 6. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java (at line 249) [ecj-lint] InitialContext ctx = new InitialContext(); [ecj-lint] ^^ [ecj-lint] InitialContext cannot be resolved to a type [ecj-lint] -- [ecj-lint] 6 problems (6 errors) BUILD FAILED /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/build.xml:634: The following error occurred while executing this line: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/build.xml:101: The following error occurred while executing this line: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build.xml:651: The following error occurred while executing this line: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/common-build.xml:479: The following error occurred while executing this line: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/common-build.xml:2009: The following error occurred while executing this line: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/common-build.xml:2048: Compile failed; see the compiler error output for details. Total time: 104 minutes 57 seconds Build step 'Invoke Ant' marked build as failure Archiving artifacts Recording test results Email was triggered for: Failure - Any Sending email for trigger: Failure - Any - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-13757) termfreq function does not work for (int) point field
Andreas Hubold created SOLR-13757: - Summary: termfreq function does not work for (int) point field Key: SOLR-13757 URL: https://issues.apache.org/jira/browse/SOLR-13757 Project: Solr Issue Type: Bug Security Level: Public (Default Security Level. Issues are Public) Affects Versions: 8.2 Reporter: Andreas Hubold The termfreq function always returns 0 for IntPointField. It used to work with the deprecated TrieIntField, for which IntPointField is documented as replacement. If this cannot be fixed because of the missing terms index, then the reference guide should mention the requirements for the termfreq function, so that users are warned to not use that function with IntPointField. For more information, see also the question on the mailing list: https://mail-archives.apache.org/mod_mbox/lucene-solr-user/201909.mbox/%3Cd81d886b-f9d5-caa0-efaf-ac7679c26c14%40coremedia.com%3E -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] atris commented on issue #877: LUCENE-8978: Maximal Bottom Score Based Early Termination
atris commented on issue #877: LUCENE-8978: Maximal Bottom Score Based Early Termination URL: https://github.com/apache/lucene-solr/pull/877#issuecomment-530818867 cc @jimczi I have not implemented the logic for Paging collector yet -- can do it in a separate PR if this looks fine This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] atris opened a new pull request #877: LUCENE-8978: Maximal Bottom Score Based Early Termination
atris opened a new pull request #877: LUCENE-8978: Maximal Bottom Score Based Early Termination URL: https://github.com/apache/lucene-solr/pull/877 This commit introduces a mechanism for early termination where, for indices sorted by relevance, hits are collected in per thread queue. Full PQs publish their bottom values and the maximum of them is then used by the corresponding collectors to filter further hits This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] jimczi commented on issue #854: Shared PQ Based Early Termination for Concurrent Search
jimczi commented on issue #854: Shared PQ Based Early Termination for Concurrent Search URL: https://github.com/apache/lucene-solr/pull/854#issuecomment-530794180 > I was thinking of a class similar to HitsThresholdChecker, allowing shared state, since, as you rightly said, the size of critical section should be small. WDYT? This is a good idea, it could be used to publish the maximum minimum value and collectors would use it to check with their local minimum too. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-master - Build # 3719 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/3719/ All tests passed Build Log: [...truncated 64081 lines...] -ecj-javadoc-lint-src: [mkdir] Created dir: /tmp/ecj880195367 [ecj-lint] Compiling 1289 source files to /tmp/ecj880195367 [ecj-lint] Processing annotations [ecj-lint] Annotations processed [ecj-lint] Processing annotations [ecj-lint] No elements to process [ecj-lint] invalid Class-Path header in manifest of jar file: /home/jenkins/.ivy2/cache/org.restlet.jee/org.restlet/jars/org.restlet-2.3.0.jar [ecj-lint] invalid Class-Path header in manifest of jar file: /home/jenkins/.ivy2/cache/org.restlet.jee/org.restlet.ext.servlet/jars/org.restlet.ext.servlet-2.3.0.jar [ecj-lint] -- [ecj-lint] 1. WARNING in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java (at line 219) [ecj-lint] return (NamedList) new JavaBinCodec(resolver).unmarshal(in); [ecj-lint]^^ [ecj-lint] Resource leak: '' is never closed [ecj-lint] -- [ecj-lint] -- [ecj-lint] 2. WARNING in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java (at line 788) [ecj-lint] throw new UnsupportedOperationException("must add at least 1 node first"); [ecj-lint] ^^ [ecj-lint] Resource leak: 'queryRequest' is not closed at this location [ecj-lint] -- [ecj-lint] 3. WARNING in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java (at line 794) [ecj-lint] throw new UnsupportedOperationException("must add at least 1 node first"); [ecj-lint] ^^ [ecj-lint] Resource leak: 'queryRequest' is not closed at this location [ecj-lint] -- [ecj-lint] -- [ecj-lint] 4. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 19) [ecj-lint] import javax.naming.Context; [ecj-lint] [ecj-lint] The type javax.naming.Context is not accessible [ecj-lint] -- [ecj-lint] 5. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 20) [ecj-lint] import javax.naming.InitialContext; [ecj-lint]^^^ [ecj-lint] The type javax.naming.InitialContext is not accessible [ecj-lint] -- [ecj-lint] 6. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 21) [ecj-lint] import javax.naming.NamingException; [ecj-lint] [ecj-lint] The type javax.naming.NamingException is not accessible [ecj-lint] -- [ecj-lint] 7. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 22) [ecj-lint] import javax.naming.NoInitialContextException; [ecj-lint]^^ [ecj-lint] The type javax.naming.NoInitialContextException is not accessible [ecj-lint] -- [ecj-lint] 8. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 776) [ecj-lint] Context c = new InitialContext(); [ecj-lint] ^^^ [ecj-lint] Context cannot be resolved to a type [ecj-lint] -- [ecj-lint] 9. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 776) [ecj-lint] Context c = new InitialContext(); [ecj-lint] ^^ [ecj-lint] InitialContext cannot be resolved to a type [ecj-lint] -- [ecj-lint] 10. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 779) [ecj-lint] } catch (NoInitialContextException e) { [ecj-lint] ^ [ecj-lint] NoInitialContextException cannot be resolved to a type [ecj-lint] -- [ecj-lint] 11. ERROR in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (at line 781) [ecj-lint] } catch (NamingException e) { [ecj-lint] ^^^ [ecj-lint] NamingException cannot be resolved to a type [ecj-lint] -- [ecj-lint] -- [ecj-lint] 12. WARNING in /home/jenkins/jenkins-slave/workspa
[jira] [Updated] (SOLR-13751) Add BooleanSimilarityFactory to Solr
[ https://issues.apache.org/jira/browse/SOLR-13751?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Andy Webb updated SOLR-13751: - Status: Patch Available (was: Open) > Add BooleanSimilarityFactory to Solr > > > Key: SOLR-13751 > URL: https://issues.apache.org/jira/browse/SOLR-13751 > Project: Solr > Issue Type: New Feature > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Andy Webb >Priority: Minor > Time Spent: 0.5h > Remaining Estimate: 0h > > Solr doesn't expose Lucene's BooleanSimilarity (ref LUCENE-5867) so it's not > available for use in situations where BM25/TDF-IF are not useful. (Fields > using this similarity will likely also set omitNorms and > omitTermFreqAndPositions to true.) > Our use case is ngram-driven suggestions, where the frequency of occurrence > of a particular sequence of characters is not something users would expect to > be taken into account when ordering suggestions. > > Here's my PR: [https://github.com/apache/lucene-solr/pull/867] (I'm at > Activate if anyone would like to talk this through in person.) -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-13751) Add BooleanSimilarityFactory to Solr
[ https://issues.apache.org/jira/browse/SOLR-13751?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Andy Webb updated SOLR-13751: - Description: Solr doesn't expose Lucene's BooleanSimilarity (ref LUCENE-5867) so it's not available for use in situations where BM25/TDF-IF are not useful. (Fields using this similarity will likely also set omitNorms and omitTermFreqAndPositions to true.) Our use case is ngram-driven suggestions, where the frequency of occurrence of a particular sequence of characters is not something users would expect to be taken into account when ordering suggestions. Here's my PR: [https://github.com/apache/lucene-solr/pull/867] (I'm at Activate if anyone would like to talk this through in person.) was: Solr doesn't expose Lucene's BooleanSimilarity (ref LUCENE-5867) so it's not available for use in situations where BM25/TDF-IF are not useful. Our use case is ngram-driven suggestions, where the frequency of occurrence of a particular sequence of characters is not something users would expect to be taken into account when ordering suggestions. Here's my draft PR: [https://github.com/apache/lucene-solr/pull/867] (I'm at the pre-Activate Hack Day in Washington DC.) > Add BooleanSimilarityFactory to Solr > > > Key: SOLR-13751 > URL: https://issues.apache.org/jira/browse/SOLR-13751 > Project: Solr > Issue Type: New Feature > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Andy Webb >Priority: Minor > Time Spent: 0.5h > Remaining Estimate: 0h > > Solr doesn't expose Lucene's BooleanSimilarity (ref LUCENE-5867) so it's not > available for use in situations where BM25/TDF-IF are not useful. (Fields > using this similarity will likely also set omitNorms and > omitTermFreqAndPositions to true.) > Our use case is ngram-driven suggestions, where the frequency of occurrence > of a particular sequence of characters is not something users would expect to > be taken into account when ordering suggestions. > > Here's my PR: [https://github.com/apache/lucene-solr/pull/867] (I'm at > Activate if anyone would like to talk this through in person.) -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] atris opened a new pull request #876: Use The Passed In Threshold Value in doConcurrentSearchWithThreshold
atris opened a new pull request #876: Use The Passed In Threshold Value in doConcurrentSearchWithThreshold URL: https://github.com/apache/lucene-solr/pull/876 This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (LUCENE-8978) "Max Bottom" Based Early Termination For Concurrent Search
Atri Sharma created LUCENE-8978: --- Summary: "Max Bottom" Based Early Termination For Concurrent Search Key: LUCENE-8978 URL: https://issues.apache.org/jira/browse/LUCENE-8978 Project: Lucene - Core Issue Type: Improvement Reporter: Atri Sharma When running a search concurrently, collectors which have collected the number of hits requested locally i.e. their local priority queue is full can then globally publish their bottom hit's score, and other collectors can then use that score as the filter. If multiple collectors have full priority queues, the maximum of all bottom scores will be considered as the global bottom score. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-13734) JWTAuthPlugin to support multiple issuers
[ https://issues.apache.org/jira/browse/SOLR-13734?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16928363#comment-16928363 ] Jan Høydahl commented on SOLR-13734: [~noble.paul] or others, do you have time for a quick look? In particular I want feedback on the config syntax change and back-compat. I have focused on supporting both new-style and old-style issuer configuration and currently print a warning if old-style (top level json keys instead of inside 'issuers' array) is used. Although documentation focus on new-style, I do no plan for removal of the old syntax. Should we make the two equal and not print deprecation warnings? At minimum we need to keep support until SOLR-13744 is done. Also I'd like feedback on whether the CHANGES.txt and RefGuide page is clear both for old and new users of the plugin. > JWTAuthPlugin to support multiple issuers > - > > Key: SOLR-13734 > URL: https://issues.apache.org/jira/browse/SOLR-13734 > Project: Solr > Issue Type: New Feature > Security Level: Public(Default Security Level. Issues are Public) > Components: security >Reporter: Jan Høydahl >Assignee: Jan Høydahl >Priority: Major > Labels: JWT, authentication, pull-request-available > Fix For: 8.3 > > Attachments: jwt-authentication-plugin.html > > Time Spent: 20m > Remaining Estimate: 0h > > In some large enterprise environments, there is more than one [Identity > Provider|https://en.wikipedia.org/wiki/Identity_provider] to issue tokens for > users. The equivalent example from the public internet is logging in to a > website and choose between multiple pre-defined IdPs (such as Google, GitHub, > Facebook etc) in the Oauth2/OIDC flow. > In the enterprise the IdPs could be public ones but most likely they will be > private IdPs in various networks inside the enterprise. Users will interact > with a search application, e.g. one providing enterprise wide search, and > will authenticate with one out of several IdPs depending on their local > affiliation. The search app will then request an access token (JWT) for the > user and issue requests to Solr using that token. > The JWT plugin currently supports exactly one IdP. This JIRA will extend > support for multiple IdPs for access token validation only. To limit the > scope of this Jira, Admin UI login must still happen to the "primary" IdP. > Supporting multiple IdPs for Admin UI login can be done in followup issues. -- This message was sent by Atlassian Jira (v8.3.2#803003) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] [lucene-solr] thomaswoeckinger commented on issue #855: SOLR-13739: Improve performance on huge schema updates
thomaswoeckinger commented on issue #855: SOLR-13739: Improve performance on huge schema updates URL: https://github.com/apache/lucene-solr/pull/855#issuecomment-530695879 @dsmiley ready to merge? This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org