This is an automated email from the ASF dual-hosted git repository.

dsmiley pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new 0ebe0a14ac9 SOLR-17874: Stop using HttpSolrClient in production (#3501)
0ebe0a14ac9 is described below

commit 0ebe0a14ac939de5c1f980a5221a14dfa400d49d
Author: David Smiley <[email protected]>
AuthorDate: Thu Aug 28 01:02:48 2025 -0400

    SOLR-17874: Stop using HttpSolrClient in production (#3501)
    
    Switch remaining usages of Apache HttpClient to use the internally managed 
Jetty HttpClient instance.
    * shard split: the commits
    * SYNCSHARD command/action
    * admin handler (e.g. log level change with nodes param)
    * IterativeMergeStrategy
    * config edit API
    * schema edit API
    * NodesSysPropsCacher: use default client not that of shardHandler
    Still using it in tests.
    
    And removed some deprecated internal options to use Apache HttpClient
---
 solr/CHANGES.txt                                   |  2 +
 solr/benchmark/build.gradle                        |  1 -
 solr/benchmark/gradle.lockfile                     |  2 +-
 .../apache/solr/bench/search/StreamingSearch.java  | 17 +----
 .../java/org/apache/solr/cloud/ZkController.java   |  7 +-
 .../api/collections/CollectionHandlingUtils.java   | 28 ++++---
 .../solr/cloud/api/collections/SplitShardCmd.java  | 12 ++-
 .../org/apache/solr/handler/SolrConfigHandler.java | 86 ++++++++++++----------
 .../solr/handler/admin/AdminHandlersProxy.java     | 35 ++++-----
 .../apache/solr/handler/admin/api/SyncShard.java   |  8 +-
 .../handler/component/IterativeMergeStrategy.java  | 41 +++--------
 .../solr/metrics/reporters/solr/SolrReporter.java  | 85 +--------------------
 .../org/apache/solr/schema/ManagedIndexSchema.java | 63 ++++++++--------
 .../org/apache/solr/update/SolrCmdDistributor.java |  6 +-
 .../solr/client/solrj/io/SolrClientCache.java      | 55 +-------------
 15 files changed, 148 insertions(+), 300 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2317eb97c0c..a347487d8ee 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -213,6 +213,8 @@ Other Changes
 
 * SOLR-17286: When proxying requests to another node, use Jetty HttpClient not 
Apache HttpClient. (David Smiley)
 
+* SOLR-17874: Switch remaining usages of Apache HttpClient to use the 
internally managed Jetty HttpClient instance. (David Smiley)
+
 ==================  9.10.0 ==================
 New Features
 ---------------------
diff --git a/solr/benchmark/build.gradle b/solr/benchmark/build.gradle
index fe0b0f44e22..bf32ee89457 100644
--- a/solr/benchmark/build.gradle
+++ b/solr/benchmark/build.gradle
@@ -46,7 +46,6 @@ dependencies {
   implementation project(':solr:solrj-streaming')
 
   implementation libs.apache.lucene.core
-  implementation libs.apache.httpcomponents.httpclient
   implementation libs.commonsio.commonsio
   implementation libs.dropwizard.metrics.core
   implementation libs.apache.commons.math3
diff --git a/solr/benchmark/gradle.lockfile b/solr/benchmark/gradle.lockfile
index 5028aa85ff0..afb5b1fce29 100644
--- a/solr/benchmark/gradle.lockfile
+++ b/solr/benchmark/gradle.lockfile
@@ -32,7 +32,7 @@ 
com.jayway.jsonpath:json-path:2.9.0=jarValidation,runtimeClasspath,testRuntimeCl
 com.lmax:disruptor:3.4.4=jarValidation,runtimeClasspath,testRuntimeClasspath
 com.tdunning:t-digest:3.3=jarValidation,runtimeClasspath,testRuntimeClasspath
 
commons-cli:commons-cli:1.9.0=jarValidation,runtimeClasspath,testRuntimeClasspath
-commons-codec:commons-codec:1.17.2=compileClasspath,jarValidation,runtimeClasspath,testCompileClasspath,testRuntimeClasspath
+commons-codec:commons-codec:1.17.2=jarValidation,runtimeClasspath,testRuntimeClasspath
 
commons-io:commons-io:2.17.0=compileClasspath,jarValidation,runtimeClasspath,testCompileClasspath,testRuntimeClasspath
 
io.dropwizard.metrics:metrics-annotation:4.2.26=jarValidation,runtimeClasspath,testRuntimeClasspath
 
io.dropwizard.metrics:metrics-core:4.2.26=compileClasspath,jarValidation,runtimeClasspath,testCompileClasspath,testRuntimeClasspath
diff --git 
a/solr/benchmark/src/java/org/apache/solr/bench/search/StreamingSearch.java 
b/solr/benchmark/src/java/org/apache/solr/bench/search/StreamingSearch.java
index 14046644c46..09928af733c 100644
--- a/solr/benchmark/src/java/org/apache/solr/bench/search/StreamingSearch.java
+++ b/solr/benchmark/src/java/org/apache/solr/bench/search/StreamingSearch.java
@@ -28,7 +28,6 @@ import org.apache.solr.bench.MiniClusterState;
 import org.apache.solr.bench.MiniClusterState.MiniClusterBenchState;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.Http2SolrClient;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.stream.CloudSolrStream;
@@ -98,13 +97,9 @@ public class StreamingSearch {
     public void setupIteration(MiniClusterState.MiniClusterBenchState 
miniClusterState)
         throws SolrServerException, IOException {
       SolrClientCache solrClientCache;
-      if (useHttp1) {
-        var httpClient = HttpClientUtil.createClient(null); // TODO tune 
params?
-        solrClientCache = new SolrClientCache(httpClient);
-      } else {
-        http2SolrClient = newHttp2SolrClient();
-        solrClientCache = new SolrClientCache(http2SolrClient);
-      }
+      // TODO tune params?
+      var client = new Http2SolrClient.Builder().useHttp1_1(useHttp1).build();
+      solrClientCache = new SolrClientCache(client);
 
       streamContext = new StreamContext();
       streamContext.setSolrClientCache(solrClientCache);
@@ -145,10 +140,4 @@ public class StreamingSearch {
       tupleStream.close();
     }
   }
-
-  public static Http2SolrClient newHttp2SolrClient() {
-    // TODO tune params?
-    var builder = new Http2SolrClient.Builder();
-    return builder.build();
-  }
 }
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java 
b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index 0f4465cb867..5309047943a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -61,7 +61,6 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
 import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
 import org.apache.solr.client.solrj.impl.SolrZkClientTimeout;
@@ -118,7 +117,6 @@ import org.apache.solr.core.NodeRoles;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrCoreInitializationException;
 import org.apache.solr.handler.component.HttpShardHandler;
-import org.apache.solr.handler.component.HttpShardHandlerFactory;
 import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.update.UpdateLog;
@@ -393,10 +391,7 @@ public class ZkController implements Closeable {
     }
     this.overseerCollectionQueue = overseer.getCollectionQueue(zkClient);
     this.overseerConfigSetQueue = overseer.getConfigSetQueue(zkClient);
-    final var client =
-        (Http2SolrClient)
-            ((HttpShardHandlerFactory) 
getCoreContainer().getShardHandlerFactory()).getClient();
-    this.sysPropsCacher = new NodesSysPropsCacher(client, zkStateReader);
+    this.sysPropsCacher = new 
NodesSysPropsCacher(cc.getDefaultHttpSolrClient(), zkStateReader);
     assert ObjectReleaseTracker.track(this);
   }
 
diff --git 
a/solr/core/src/java/org/apache/solr/cloud/api/collections/CollectionHandlingUtils.java
 
b/solr/core/src/java/org/apache/solr/cloud/api/collections/CollectionHandlingUtils.java
index 738994a94d5..ba02a828724 100644
--- 
a/solr/core/src/java/org/apache/solr/cloud/api/collections/CollectionHandlingUtils.java
+++ 
b/solr/core/src/java/org/apache/solr/cloud/api/collections/CollectionHandlingUtils.java
@@ -39,7 +39,7 @@ import java.util.stream.Collectors;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.UpdateResponse;
@@ -218,14 +218,19 @@ public class CollectionHandlingUtils {
     }
   }
 
-  static void commit(NamedList<Object> results, String slice, Replica 
parentShardLeader) {
+  static void commit(
+      Http2SolrClient solrClient,
+      NamedList<Object> results,
+      String slice,
+      Replica parentShardLeader) {
     log.debug("Calling soft commit to make sub shard updates visible");
     String coreUrl = parentShardLeader.getCoreUrl();
     // HttpShardHandler is hard coded to send a QueryRequest hence we go direct
     // and we force open a searcher so that we have documents to show upon 
switching states
     UpdateResponse updateResponse = null;
     try {
-      updateResponse = softCommit(parentShardLeader.getBaseUrl(), 
parentShardLeader.getCoreName());
+      updateResponse =
+          softCommit(solrClient, parentShardLeader.getBaseUrl(), 
parentShardLeader.getCoreName());
       CollectionHandlingUtils.processResponse(
           results, null, coreUrl, updateResponse, slice, 
Collections.emptySet());
     } catch (Exception e) {
@@ -238,19 +243,12 @@ public class CollectionHandlingUtils {
     }
   }
 
-  static UpdateResponse softCommit(String baseUrl, String coreName)
+  private static UpdateResponse softCommit(
+      Http2SolrClient solrClient, String baseUrl, String coreName)
       throws SolrServerException, IOException {
-
-    try (SolrClient client =
-        new HttpSolrClient.Builder(baseUrl)
-            .withDefaultCollection(coreName)
-            .withConnectionTimeout(30000, TimeUnit.MILLISECONDS)
-            .withSocketTimeout(120000, TimeUnit.MILLISECONDS)
-            .build()) {
-      UpdateRequest ureq = new UpdateRequest();
-      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
-      return ureq.process(client);
-    }
+    UpdateRequest ureq = new UpdateRequest();
+    ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
+    return solrClient.requestWithBaseUrl(baseUrl, coreName, ureq);
   }
 
   public static String waitForCoreNodeName(
diff --git 
a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java 
b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index 12ac6a4d89a..158ecea21d9 100644
--- 
a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ 
b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -716,7 +716,11 @@ public class SplitShardCmd implements 
CollApiCmds.CollectionApiCommand {
         // state switch as per SOLR-13945 so that sub shards don't come up 
empty, momentarily, after
         // being marked active)
         t = timings.sub("finalCommit");
-        CollectionHandlingUtils.commit(results, slice.get(), 
parentShardLeader);
+        CollectionHandlingUtils.commit(
+            
ccc.getCoreContainer().getUpdateShardHandler().getUpdateOnlyHttpClient(),
+            results,
+            slice.get(),
+            parentShardLeader);
         t.stop();
         // switch sub shard states to 'active'
         log.info("Replication factor is 1 so switching shard states");
@@ -796,7 +800,11 @@ public class SplitShardCmd implements 
CollApiCmds.CollectionApiCommand {
       // when the sub-shard replicas come up
       if (repFactor > 1) {
         t = timings.sub("finalCommit");
-        CollectionHandlingUtils.commit(results, slice.get(), 
parentShardLeader);
+        CollectionHandlingUtils.commit(
+            
ccc.getCoreContainer().getUpdateShardHandler().getUpdateOnlyHttpClient(),
+            results,
+            slice.get(),
+            parentShardLeader);
         t.stop();
       }
 
diff --git a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java 
b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
index d298d69fb0a..e1f9e030232 100644
--- a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
@@ -55,9 +55,10 @@ import org.apache.solr.api.AnnotatedApi;
 import org.apache.solr.api.Api;
 import org.apache.solr.api.ApiBag;
 import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.io.stream.expr.Expressible;
 import org.apache.solr.client.solrj.request.CollectionRequiringSolrRequest;
+import org.apache.solr.client.solrj.response.SimpleSolrResponse;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.ZkSolrResourceLoader;
 import org.apache.solr.common.MapSerializable;
@@ -843,8 +844,10 @@ public class SolrConfigHandler extends RequestHandlerBase
     // course)
     List<PerReplicaCallable> concurrentTasks = new ArrayList<>();
 
+    var http2SolrClient = 
zkController.getCoreContainer().getDefaultHttpSolrClient();
     for (Replica replica : getActiveReplicas(zkController, collection)) {
-      PerReplicaCallable e = new PerReplicaCallable(replica, prop, 
expectedVersion, maxWaitSecs);
+      PerReplicaCallable e =
+          new PerReplicaCallable(http2SolrClient, replica, prop, 
expectedVersion, maxWaitSecs);
       concurrentTasks.add(e);
     }
     if (concurrentTasks.isEmpty()) return; // nothing to wait for ...
@@ -857,6 +860,7 @@ public class SolrConfigHandler extends RequestHandlerBase
     }
 
     // use an executor service to invoke schema zk version requests in 
parallel with a max wait time
+    // TODO use httpSolrClient.requestAsync instead; it has an executor
     int poolSize = Math.min(concurrentTasks.size(), 10);
     ExecutorService parallelExecutor =
         ExecutorUtil.newMDCAwareFixedThreadPool(
@@ -959,14 +963,21 @@ public class SolrConfigHandler extends RequestHandlerBase
 
   private static class PerReplicaCallable extends 
CollectionRequiringSolrRequest<SolrResponse>
       implements Callable<Boolean> {
+    private final Http2SolrClient solrClient;
     Replica replica;
     String prop;
     int expectedZkVersion;
     Number remoteVersion = null;
     int maxWait;
 
-    PerReplicaCallable(Replica replica, String prop, int expectedZkVersion, 
int maxWait) {
+    PerReplicaCallable(
+        Http2SolrClient solrClient,
+        Replica replica,
+        String prop,
+        int expectedZkVersion,
+        int maxWait) {
       super(METHOD.GET, "/config/" + ZNODEVER, SolrRequestType.ADMIN);
+      this.solrClient = solrClient;
       this.replica = replica;
       this.expectedZkVersion = expectedZkVersion;
       this.prop = prop;
@@ -984,42 +995,41 @@ public class SolrConfigHandler extends RequestHandlerBase
     public Boolean call() throws Exception {
       final RTimer timer = new RTimer();
       int attempts = 0;
-      try (HttpSolrClient solr =
-          new HttpSolrClient.Builder(replica.getBaseUrl())
-              .withDefaultCollection(replica.getCoreName())
-              .build()) {
-        // eventually, this loop will get killed by the ExecutorService's 
timeout
-        while (true) {
-          try {
-            long timeElapsed = (long) timer.getTime() / 1000;
-            if (timeElapsed >= maxWait) {
-              return false;
-            }
-            log.info("Time elapsed : {} secs, maxWait {}", timeElapsed, 
maxWait);
-            Thread.sleep(100);
-            NamedList<Object> resp = solr.httpUriRequest(this).future.get();
-            if (resp != null) {
-              @SuppressWarnings({"rawtypes"})
-              Map m = (Map) resp.get(ZNODEVER);
-              if (m != null) {
-                remoteVersion = (Number) m.get(prop);
-                if (remoteVersion != null && remoteVersion.intValue() >= 
expectedZkVersion) break;
-              }
+      // eventually, this loop will get killed by the ExecutorService's timeout
+      while (true) {
+        try {
+          long timeElapsed = (long) timer.getTime() / 1000;
+          if (timeElapsed >= maxWait) {
+            return false;
+          }
+          log.info("Time elapsed : {} secs, maxWait {}", timeElapsed, maxWait);
+          Thread.sleep(100);
+
+          NamedList<Object> resp =
+              solrClient
+                  .requestWithBaseUrl(replica.getBaseUrl(), 
replica.getCoreName(), this)
+                  .getResponse();
+          if (resp != null) {
+            @SuppressWarnings({"rawtypes"})
+            Map m = (Map) resp.get(ZNODEVER);
+            if (m != null) {
+              remoteVersion = (Number) m.get(prop);
+              if (remoteVersion != null && remoteVersion.intValue() >= 
expectedZkVersion) break;
             }
+          }
 
-            attempts++;
-            if (log.isInfoEnabled()) {
-              log.info(
-                  formatString(
-                      "Could not get expectedVersion {0} from {1} for prop {2} 
  after {3} attempts",
-                      expectedZkVersion, replica.getCoreUrl(), prop, 
attempts));
-            }
-          } catch (Exception e) {
-            if (e instanceof InterruptedException) {
-              break; // stop looping
-            } else {
-              log.warn("Failed to get /schema/zkversion from {} due to: ", 
replica.getCoreUrl(), e);
-            }
+          attempts++;
+          if (log.isInfoEnabled()) {
+            log.info(
+                formatString(
+                    "Could not get expectedVersion {0} from {1} for prop {2}   
after {3} attempts",
+                    expectedZkVersion, replica.getCoreUrl(), prop, attempts));
+          }
+        } catch (Exception e) {
+          if (e instanceof InterruptedException) {
+            break; // stop looping
+          } else {
+            log.warn("Failed to get /schema/zkversion from {} due to: ", 
replica.getCoreUrl(), e);
           }
         }
       }
@@ -1028,7 +1038,7 @@ public class SolrConfigHandler extends RequestHandlerBase
 
     @Override
     protected SolrResponse createResponse(NamedList<Object> namedList) {
-      return null;
+      return new SimpleSolrResponse();
     }
   }
 
diff --git 
a/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java 
b/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
index 2d5b8d9f6f2..0e0806022c7 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
@@ -21,25 +21,23 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.net.URI;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.GenericSolrRequest;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Pair;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
@@ -101,16 +99,14 @@ public class AdminHandlersProxy {
 
     ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
     params.remove(PARAM_NODES);
-    Map<String, Pair<Future<NamedList<Object>>, SolrClient>> responses = new 
HashMap<>();
+    Map<String, Future<NamedList<Object>>> responses = new LinkedHashMap<>();
     for (String node : nodes) {
       responses.put(node, callRemoteNode(node, pathStr, params, 
container.getZkController()));
     }
 
-    for (Map.Entry<String, Pair<Future<NamedList<Object>>, SolrClient>> entry :
-        responses.entrySet()) {
+    for (Map.Entry<String, Future<NamedList<Object>>> entry : 
responses.entrySet()) {
       try {
-        NamedList<Object> resp = entry.getValue().first().get(10, 
TimeUnit.SECONDS);
-        entry.getValue().second().close();
+        NamedList<Object> resp = entry.getValue().get(10, TimeUnit.SECONDS);
         rsp.add(entry.getKey(), resp);
       } catch (ExecutionException ee) {
         log.warn("Exception when fetching result from node {}", 
entry.getKey(), ee);
@@ -125,18 +121,17 @@ public class AdminHandlersProxy {
     return true;
   }
 
-  /**
-   * Makes a remote request and returns a future and the solr client. The 
caller is responsible for
-   * closing the client
-   */
-  public static Pair<Future<NamedList<Object>>, SolrClient> callRemoteNode(
-      String nodeName, String endpoint, SolrParams params, ZkController 
zkController)
+  /** Makes a remote request asynchronously. */
+  public static CompletableFuture<NamedList<Object>> callRemoteNode(
+      String nodeName, String uriPath, SolrParams params, ZkController 
zkController)
       throws IOException, SolrServerException {
-    log.debug("Proxying {} request to node {}", endpoint, nodeName);
+    log.debug("Proxying {} request to node {}", uriPath, nodeName);
     URI baseUri = 
URI.create(zkController.zkStateReader.getBaseUrlForNodeName(nodeName));
-    HttpSolrClient solr = new 
HttpSolrClient.Builder(baseUri.toString()).build();
-    SolrRequest<?> proxyReq = new GenericSolrRequest(SolrRequest.METHOD.GET, 
endpoint, params);
-    HttpSolrClient.HttpUriRequestResponse proxyResp = 
solr.httpUriRequest(proxyReq);
-    return new Pair<>(proxyResp.future, solr);
+    SolrRequest<?> proxyReq = new GenericSolrRequest(SolrRequest.METHOD.GET, 
uriPath, params);
+
+    return zkController
+        .getCoreContainer()
+        .getDefaultHttpSolrClient()
+        .requestWithBaseUrl(baseUri.toString(), c -> c.requestAsync(proxyReq));
   }
 }
diff --git 
a/solr/core/src/java/org/apache/solr/handler/admin/api/SyncShard.java 
b/solr/core/src/java/org/apache/solr/handler/admin/api/SyncShard.java
index 8dbe4921c9a..1bf3dc56f4e 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/api/SyncShard.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/api/SyncShard.java
@@ -28,7 +28,7 @@ import org.apache.solr.client.api.endpoint.SyncShardApi;
 import org.apache.solr.client.api.model.SolrJerseyResponse;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -80,9 +80,9 @@ public class SyncShard extends AdminAPIBase implements 
SyncShardApi {
     Replica leader = docCollection.getLeader(shardName);
 
     try (SolrClient client =
-        new HttpSolrClient.Builder(leader.getBaseUrl())
-            .withConnectionTimeout(15000, TimeUnit.MILLISECONDS)
-            .withSocketTimeout(60000, TimeUnit.MILLISECONDS)
+        new Http2SolrClient.Builder(leader.getBaseUrl())
+            .withHttpClient(coreContainer.getDefaultHttpSolrClient())
+            .withIdleTimeout(60000, TimeUnit.MILLISECONDS)
             .build()) {
       CoreAdminRequest.RequestSyncShard reqSyncShard = new 
CoreAdminRequest.RequestSyncShard();
       reqSyncShard.setCollection(collection);
diff --git 
a/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
 
b/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
index 41df454d7b6..7fb538315ff 100644
--- 
a/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
+++ 
b/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
@@ -18,17 +18,13 @@ package org.apache.solr.handler.component;
 
 import static org.apache.solr.common.params.CommonParams.DISTRIB;
 
-import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocumentList;
@@ -37,31 +33,27 @@ import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.common.util.URLUtil;
 import org.apache.solr.search.SolrIndexSearcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public abstract class IterativeMergeStrategy implements MergeStrategy {
 
   protected volatile ExecutorService executorService;
 
-  protected volatile CloseableHttpClient httpClient;
-
-  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  protected volatile Http2SolrClient httpSolrClient;
 
   @Override
   public void merge(ResponseBuilder rb, ShardRequest sreq) {
     rb._responseDocs = new SolrDocumentList(); // Null pointers will occur 
otherwise.
     rb.onePassDistributedQuery = true; // Turn off the second pass distributed.
+    httpSolrClient = rb.req.getCoreContainer().getDefaultHttpSolrClient();
+    // TODO use httpSolrClient.requestAsync instead; it has an executor
     executorService =
         ExecutorUtil.newMDCAwareCachedThreadPool(
             new SolrNamedThreadFactory("IterativeMergeStrategy"));
-    httpClient = getHttpClient();
     try {
       process(rb, sreq);
     } catch (Exception e) {
       throw new RuntimeException(e);
     } finally {
-      HttpClientUtil.close(httpClient);
       executorService.shutdownNow();
     }
   }
@@ -85,20 +77,16 @@ public abstract class IterativeMergeStrategy implements 
MergeStrategy {
   public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher 
searcher) {}
 
   public class CallBack implements Callable<CallBack> {
-    private SolrClient solrClient;
+    private final String shardBaseUrl;
+    private final String shardCoreName;
+
     private QueryRequest req;
     private QueryResponse response;
     private ShardResponse originalShardResponse;
 
     public CallBack(ShardResponse originalShardResponse, QueryRequest req) {
-      final String shardBaseUrl = 
URLUtil.extractBaseUrl(originalShardResponse.getShardAddress());
-      final String shardCoreName =
-          
URLUtil.extractCoreFromCoreUrl(originalShardResponse.getShardAddress());
-      this.solrClient =
-          new Builder(shardBaseUrl)
-              .withDefaultCollection(shardCoreName)
-              .withHttpClient(httpClient)
-              .build();
+      this.shardBaseUrl = 
URLUtil.extractBaseUrl(originalShardResponse.getShardAddress());
+      this.shardCoreName = 
URLUtil.extractCoreFromCoreUrl(originalShardResponse.getShardAddress());
       this.req = req;
       this.originalShardResponse = originalShardResponse;
       req.setMethod(SolrRequest.METHOD.POST);
@@ -116,7 +104,7 @@ public abstract class IterativeMergeStrategy implements 
MergeStrategy {
 
     @Override
     public CallBack call() throws Exception {
-      this.response = req.process(solrClient);
+      response = httpSolrClient.requestWithBaseUrl(shardBaseUrl, 
shardCoreName, req);
       return this;
     }
   }
@@ -134,13 +122,4 @@ public abstract class IterativeMergeStrategy implements 
MergeStrategy {
   }
 
   protected abstract void process(ResponseBuilder rb, ShardRequest sreq) 
throws Exception;
-
-  private CloseableHttpClient getHttpClient() {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 128);
-    params.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 32);
-    CloseableHttpClient httpClient = HttpClientUtil.createClient(params);
-
-    return httpClient;
-  }
 }
diff --git 
a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java 
b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java
index 4ee771c730b..b66b94bc4df 100644
--- 
a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java
+++ 
b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java
@@ -39,7 +39,6 @@ import java.util.function.Supplier;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
-import org.apache.http.client.HttpClient;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -176,7 +175,7 @@ public class SolrReporter extends ScheduledReporter {
      * Default is false.
      *
      * @param cloudClient use CloudSolrClient when true, {@link
-     *     org.apache.solr.client.solrj.impl.HttpSolrClient} otherwise.
+     *     org.apache.solr.client.solrj.impl.Http2SolrClient} otherwise.
      * @return {@code this}
      */
     public Builder cloudClient(boolean cloudClient) {
@@ -262,34 +261,6 @@ public class SolrReporter extends ScheduledReporter {
       return this;
     }
 
-    /**
-     * Build it.
-     *
-     * @param client an instance of {@link HttpClient} to be used for making 
calls.
-     * @param urlProvider function that returns the base URL of Solr instance 
to target. May return
-     *     null to indicate that reporting should be skipped. Note: this 
function will be called
-     *     every time just before report is sent.
-     * @return configured instance of reporter
-     * @deprecated use {@link #build(SolrClientCache, Supplier)} instead.
-     */
-    @Deprecated
-    public SolrReporter build(HttpClient client, Supplier<String> urlProvider) 
{
-      return new SolrReporter(
-          client,
-          urlProvider,
-          metricManager,
-          reports,
-          handler,
-          reporterId,
-          rateUnit,
-          durationUnit,
-          params,
-          skipHistograms,
-          skipAggregateValues,
-          cloudClient,
-          compact);
-    }
-
     /**
      * Build it.
      *
@@ -367,60 +338,6 @@ public class SolrReporter extends ScheduledReporter {
   // We delegate to registries anyway, so having a dummy registry is harmless.
   private static final MetricRegistry dummyRegistry = new MetricRegistry();
 
-  // back-compat constructor
-
-  /**
-   * Create a SolrReporter instance.
-   *
-   * @param httpClient HttpClient to use for constructing SolrClient instances.
-   * @param urlProvider what URL to send to.
-   * @param metricManager metric manager
-   * @param metrics metric specifications to report
-   * @param handler handler name to report to
-   * @param reporterId my reporter id
-   * @param rateUnit rate unit
-   * @param durationUnit duration unit
-   * @param params request parameters
-   * @param skipHistograms if true then don't send histogram metrics
-   * @param skipAggregateValues if true then don't send aggregate metrics' 
individual values
-   * @param cloudClient if true then use CloudSolrClient, plain HttpSolrClient 
otherwise.
-   * @param compact if true then use compact representation.
-   * @deprecated use {@link SolrReporter#SolrReporter(SolrClientCache, 
boolean, Supplier,
-   *     SolrMetricManager, List, String, String, TimeUnit, TimeUnit, 
SolrParams, boolean, boolean,
-   *     boolean, boolean)} instead.
-   */
-  @Deprecated
-  public SolrReporter(
-      HttpClient httpClient,
-      Supplier<String> urlProvider,
-      SolrMetricManager metricManager,
-      List<Report> metrics,
-      String handler,
-      String reporterId,
-      TimeUnit rateUnit,
-      TimeUnit durationUnit,
-      SolrParams params,
-      boolean skipHistograms,
-      boolean skipAggregateValues,
-      boolean cloudClient,
-      boolean compact) {
-    this(
-        new SolrClientCache(httpClient),
-        true,
-        urlProvider,
-        metricManager,
-        metrics,
-        handler,
-        reporterId,
-        rateUnit,
-        durationUnit,
-        params,
-        skipHistograms,
-        skipAggregateValues,
-        cloudClient,
-        compact);
-  }
-
   /**
    * Create a SolrReporter instance.
    *
diff --git a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java 
b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
index a6eed580e75..053427b9eb3 100644
--- a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
@@ -41,8 +41,8 @@ import java.util.concurrent.TimeUnit;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.Version;
 import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionRequiringSolrRequest;
+import org.apache.solr.client.solrj.response.SimpleSolrResponse;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.ZkSolrResourceLoader;
 import org.apache.solr.common.SolrException;
@@ -251,6 +251,7 @@ public final class ManagedIndexSchema extends IndexSchema {
     }
 
     // use an executor service to invoke schema zk version requests in 
parallel with a max wait time
+    // TODO use httpSolrClient.requestAsync instead; it has an executor
     int poolSize = Math.min(concurrentTasks.size(), 10);
     ExecutorService parallelExecutor =
         ExecutorUtil.newMDCAwareFixedThreadPool(
@@ -372,44 +373,46 @@ public final class ManagedIndexSchema extends IndexSchema 
{
     @Override
     public Integer call() throws Exception {
       int remoteVersion = -1;
-      try (HttpSolrClient solr =
-          new 
HttpSolrClient.Builder(baseUrl).withDefaultCollection(coreName).build()) {
-        // eventually, this loop will get killed by the ExecutorService's 
timeout
-        while (remoteVersion == -1
-            || (remoteVersion < expectedZkVersion
-                && !zkController.getCoreContainer().isShutDown())) {
-          try {
-            HttpSolrClient.HttpUriRequestResponse mrr = 
solr.httpUriRequest(this);
-            NamedList<Object> zkversionResp = mrr.future.get();
-            if (zkversionResp != null) remoteVersion = (Integer) 
zkversionResp.get("zkversion");
-
-            if (remoteVersion < expectedZkVersion) {
-              // rather than waiting and re-polling, let's be proactive and 
tell the replica
-              // to refresh its schema from ZooKeeper, if that fails, then the
-              // Thread.sleep(1000); // slight delay before requesting version 
again
-              log.error(
-                  "Replica {} returned schema version {} and has not applied 
schema version {}",
-                  coreName,
-                  remoteVersion,
-                  expectedZkVersion);
-            }
 
-          } catch (Exception e) {
-            if (e instanceof InterruptedException) {
-              Thread.currentThread().interrupt();
-              break; // stop looping
-            } else {
-              log.warn("Failed to get /schema/zkversion from {} due to: ", 
baseUrl, e);
-            }
+      // eventually, this loop will get killed by the ExecutorService's timeout
+      while (remoteVersion == -1
+          || (remoteVersion < expectedZkVersion && 
!zkController.getCoreContainer().isShutDown())) {
+        try {
+          NamedList<Object> zkversionResp =
+              zkController
+                  .getCoreContainer()
+                  .getDefaultHttpSolrClient()
+                  .requestWithBaseUrl(baseUrl, coreName, this)
+                  .getResponse();
+          if (zkversionResp != null) remoteVersion = (Integer) 
zkversionResp.get("zkversion");
+
+          if (remoteVersion < expectedZkVersion) {
+            // rather than waiting and re-polling, let's be proactive and tell 
the replica
+            // to refresh its schema from ZooKeeper, if that fails, then the
+            // Thread.sleep(1000); // slight delay before requesting version 
again
+            log.error(
+                "Replica {} returned schema version {} and has not applied 
schema version {}",
+                coreName,
+                remoteVersion,
+                expectedZkVersion);
+          }
+
+        } catch (Exception e) {
+          if (e instanceof InterruptedException) {
+            Thread.currentThread().interrupt();
+            break; // stop looping
+          } else {
+            log.warn("Failed to get /schema/zkversion from {} due to: ", 
baseUrl, e);
           }
         }
       }
+
       return remoteVersion;
     }
 
     @Override
     protected SolrResponse createResponse(NamedList<Object> namedList) {
-      return null;
+      return new SimpleSolrResponse();
     }
   }
 
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java 
b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
index 0c02662299b..287c4857d95 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
@@ -35,7 +35,7 @@ import java.util.concurrent.Future;
 import org.apache.http.NoHttpResponseException;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
+import org.apache.solr.client.solrj.impl.ConcurrentUpdateHttp2SolrClient;
 import org.apache.solr.client.solrj.impl.JavaBinResponseParser;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -483,8 +483,8 @@ public class SolrCmdDistributor implements Closeable {
     /**
      * NOTE: This is the request that happened to be executed when this error 
was <b>triggered</b>
      * the error, but because of how {@link StreamingSolrClients} uses {@link
-     * ConcurrentUpdateSolrClient} it might not actaully be the request that 
<b>caused</b> the error
-     * -- multiple requests are merged &amp; processed as a sequential batch.
+     * ConcurrentUpdateHttp2SolrClient} it might not actaully be the request 
that <b>caused</b> the
+     * error -- multiple requests are merged &amp; processed as a sequential 
batch.
      */
     public Req req;
 
diff --git 
a/solr/solrj-streaming/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
 
b/solr/solrj-streaming/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
index 7e0756a0fa8..011f86555ac 100644
--- 
a/solr/solrj-streaming/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
+++ 
b/solr/solrj-streaming/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
@@ -28,11 +28,9 @@ import java.util.concurrent.atomic.AtomicReference;
 import org.apache.http.client.HttpClient;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
-import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.SolrClientBuilder;
 import org.apache.solr.common.AlreadyClosedException;
 import org.apache.solr.common.util.IOUtils;
@@ -53,24 +51,15 @@ public class SolrClientCache implements Closeable {
   private String basicAuthCredentials = null; // Only support with the 
http2SolrClient
 
   private final Map<String, SolrClient> solrClients = new HashMap<>();
-  private final HttpClient apacheHttpClient;
   private final Http2SolrClient http2SolrClient;
   private final AtomicBoolean isClosed = new AtomicBoolean(false);
   private final AtomicReference<String> defaultZkHost = new 
AtomicReference<>();
 
   public SolrClientCache() {
-    this.apacheHttpClient = null;
-    this.http2SolrClient = null;
-  }
-
-  @Deprecated(since = "9.0")
-  public SolrClientCache(HttpClient apacheHttpClient) {
-    this.apacheHttpClient = apacheHttpClient;
     this.http2SolrClient = null;
   }
 
   public SolrClientCache(Http2SolrClient http2SolrClient) {
-    this.apacheHttpClient = null;
     this.http2SolrClient = http2SolrClient;
   }
 
@@ -103,33 +92,12 @@ public class SolrClientCache implements Closeable {
     boolean canUseACLs =
         
Optional.ofNullable(defaultZkHost.get()).map(zkHostNoChroot::equals).orElse(false);
 
-    final CloudSolrClient client;
-    if (apacheHttpClient != null) {
-      client = newCloudLegacySolrClient(zkHost, apacheHttpClient, canUseACLs);
-    } else {
-      client = newCloudHttp2SolrClient(zkHost, http2SolrClient, canUseACLs, 
basicAuthCredentials);
-    }
+    final var client =
+        newCloudHttp2SolrClient(zkHost, http2SolrClient, canUseACLs, 
basicAuthCredentials);
     solrClients.put(zkHost, client);
     return client;
   }
 
-  @Deprecated
-  private static CloudSolrClient newCloudLegacySolrClient(
-      String zkHost, HttpClient httpClient, boolean canUseACLs) {
-    final List<String> hosts = List.of(zkHost);
-    var builder = new CloudLegacySolrClient.Builder(hosts, Optional.empty());
-    builder.canUseZkACLs(canUseACLs);
-    adjustTimeouts(builder, httpClient);
-    var client = builder.build();
-    try {
-      client.connect();
-    } catch (Exception e) {
-      IOUtils.closeQuietly(client);
-      throw e;
-    }
-    return client;
-  }
-
   private static CloudHttp2SolrClient newCloudHttp2SolrClient(
       String zkHost,
       Http2SolrClient http2SolrClient,
@@ -166,27 +134,12 @@ public class SolrClientCache implements Closeable {
     if (solrClients.containsKey(baseUrl)) {
       return solrClients.get(baseUrl);
     }
-    final SolrClient client;
-    if (apacheHttpClient != null) {
-      client = newHttpSolrClient(baseUrl, apacheHttpClient);
-    } else {
-      client = newHttp2SolrClientBuilder(baseUrl, http2SolrClient, 
basicAuthCredentials).build();
-    }
+    final var client =
+        newHttp2SolrClientBuilder(baseUrl, http2SolrClient, 
basicAuthCredentials).build();
     solrClients.put(baseUrl, client);
     return client;
   }
 
-  @Deprecated
-  private static SolrClient newHttpSolrClient(String url, HttpClient 
httpClient) {
-    final var builder =
-        (URLUtil.isBaseUrl(url))
-            ? new HttpSolrClient.Builder(url)
-            : new HttpSolrClient.Builder(URLUtil.extractBaseUrl(url))
-                .withDefaultCollection(URLUtil.extractCoreFromCoreUrl(url));
-    adjustTimeouts(builder, httpClient);
-    return builder.build();
-  }
-
   @Deprecated
   private static void adjustTimeouts(SolrClientBuilder<?> builder, HttpClient 
httpClient) {
     builder.withHttpClient(httpClient);


Reply via email to