I think this is a valid example. Code below produces the following I/O on stdout. Max routes for the specific Route is set to 200. The default for the connection pool manager is 50.
[Tue Feb 23 17:05:11 PST 2016]static http-deliver init starting... [Tue Feb 23 17:05:11 PST 2016]init hc ssl context... [Tue Feb 23 17:05:12 PST 2016]init hc 'allow-all' ssl socket factory... [Tue Feb 23 17:05:12 PST 2016]init hc 'allow-all' ssl registry... [Tue Feb 23 17:05:12 PST 2016]init hc 'allow-all' connection manager... [Tue Feb 23 17:05:12 PST 2016]creating common http-client:; conn-timeout=120000ms; conns-max=100000; so-timeout=120000ms [Tue Feb 23 17:05:12 PST 2016]starting http-client connection monitor thread... [Tue Feb 23 17:05:12 PST 2016]http-client connection pool monitor started. logs specific route max [Tue Feb 23 17:05:12 PST 2016]set route max-conns:; max-allowed=200; protocol-host-port=https://lx01796:2443; httpRoute={}->https://lx01796:2443 pool monitor logs stats from pool manager. value is 50 rather than 200 [Tue Feb 23 17:05:13 PST 2016]emit telemetry message: type=hc-conn-pool; route=lx01796; available=0; active=1; blocking=0; max-allowed=50; server-nm=localhost:0000 [Tue Feb 23 17:05:13 PST 2016]emit telemetry message: type=hc-conn-pool; route=all-routes; available=0; active=1; blocking=0; max-allowed=100000; server-nm=localhost:0000 [Tue Feb 23 17:05:14 PST 2016]emit telemetry message: type=hc-conn-pool; route=lx01796; available=0; active=1; blocking=0; max-allowed=50; server-nm=localhost:0000 [Tue Feb 23 17:05:14 PST 2016]emit telemetry message: type=hc-conn-pool; route=all-routes; available=0; active=1; blocking=0; max-allowed=100000; server-nm=localhost:0000 [Tue Feb 23 17:05:14 PST 2016]response code: 500 [Tue Feb 23 17:05:15 PST 2016]emit telemetry message: type=hc-conn-pool; route=lx01796; available=0; active=0; blocking=0; max-allowed=50; server-nm=localhost:0000 [Tue Feb 23 17:05:15 PST 2016]emit telemetry message: type=hc-conn-pool; route=all-routes; available=0; active=0; blocking=0; max-allowed=100000; server-nm=localhost:0000 [Tue Feb 23 17:05:16 PST 2016]emit telemetry message: type=hc-conn-pool; route=all-routes; available=0; active=0; blocking=0; max-allowed=100000; server-nm=localhost:0000 Test Case Code: package com.sbux.junk; import java.net.URL; import java.nio.charset.Charset; import java.util.Date; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.AuthCache; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.config.CookieSpecs; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.config.ConnectionConfig; import org.apache.http.config.Registry; import org.apache.http.config.RegistryBuilder; import org.apache.http.config.SocketConfig; import org.apache.http.conn.routing.HttpRoute; import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.conn.socket.PlainConnectionSocketFactory; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.conn.ssl.TrustSelfSignedStrategy; import org.apache.http.impl.auth.BasicScheme; import org.apache.http.impl.client.BasicAuthCache; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.pool.PoolStats; import org.apache.http.ssl.SSLContexts; import org.apache.logging.log4j.Marker; import org.apache.logging.log4j.MarkerManager; public class HttpClientConnLimitTest { private static PoolingHttpClientConnectionManager globalConnManagerPool; private static Thread globalConnManagerThread; private static CloseableHttpClient globalHttpClient; public static final HostnameVerifier ALLOW_ALL_VERIFIER = NoopHostnameVerifier.INSTANCE; public static final int DEFAULT_MAX_CONNECTIONS = 10000; public static final int DEFAULT_MAX_CONNECTIONS_PER_ROUTE = 50; public static final int DEFAULT_MAX_INACTIVITY_MS = 1000*60; public static final long DEFAULT_MAX_CONN_IDLE_MINUTES = 5; public static final int DEFAULT_DEFAULT_CONNECT_TIMEOUT_MS = 120000; public static final int DEFAULT_DEFAULT_SO_TIMEOUT_MS = 120000; public static final int DEFAULT_DEFAULT_RCV_BUFFER_BYTES = 2048; public static final int DEFAULT_DEFAULT_SND_BUFFER_BYTES = 2048; public static final String DEFAULT_DEFAULT_SOCKET_SO_KEEPALIVE_FLAG = "yes"; public static final String DEFAULT_HAWS_HTTP_CLIENT_CONNECTION_MONITOR = "yes"; private static final AtomicBoolean singletonsInitialized = new AtomicBoolean(false); private static void log(String s) { System.out.println("[" + new Date() + "]" + s); } private static void pause(long l) { try { Thread.sleep(l); } catch(Throwable t) {} } private static void initSingletons() throws Exception { final boolean done = singletonsInitialized.getAndSet(true); if(done) { return; } log("static http-deliver init starting..."); log("init hc ssl context..."); SSLContext sslContext; sslContext = SSLContexts.custom().useProtocol("TLS") .loadTrustMaterial(null, new TrustSelfSignedStrategy()) .build() ; log("init hc 'allow-all' ssl socket factory..."); final SSLConnectionSocketFactory sslConnSocketFactory = new SSLConnectionSocketFactory( sslContext , ALLOW_ALL_VERIFIER ); log("init hc 'allow-all' ssl registry..."); final Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create() .register("http", PlainConnectionSocketFactory.getSocketFactory()) .register("https", sslConnSocketFactory) .build() ; log("init hc 'allow-all' connection manager..."); globalConnManagerPool = new PoolingHttpClientConnectionManager( registry ); /* * setup the connection pool manager defaults */ globalConnManagerPool.setDefaultMaxPerRoute(DEFAULT_MAX_CONNECTIONS_PER_ROUTE); globalConnManagerPool.setMaxTotal(100000); globalConnManagerPool.setValidateAfterInactivity(30000); log("creating common http-client:" + "; conn-timeout=" + 120000 + "ms" + "; conns-max=" + 100000 + "; so-timeout=" + 120000 + "ms" ); final RequestConfig requestConfig = RequestConfig.copy(RequestConfig.DEFAULT) .setConnectTimeout(120000) .setConnectionRequestTimeout(120000) .setCookieSpec(CookieSpecs.DEFAULT) .setSocketTimeout(120000) .build() ; /* * create http client for this bean - same for all env's * * each env specific execution will have private http client context */ final Charset charset = Charset.forName("utf-8"); final ConnectionConfig connConfig = ConnectionConfig.copy(ConnectionConfig.DEFAULT) .setCharset(charset) .build() ; // build custom socket config final SocketConfig socketConfig = SocketConfig.copy(SocketConfig.DEFAULT) .setRcvBufSize(2048) .setSndBufSize(2048) .setSoKeepAlive(true) .setSoTimeout(120000) .build() ; final HttpClientBuilder builder = HttpClients.custom() .setConnectionManager(globalConnManagerPool) .setConnectionManagerShared(true) .setConnectionTimeToLive(DEFAULT_MAX_CONN_IDLE_MINUTES, TimeUnit.MINUTES) .setDefaultConnectionConfig(connConfig) .setDefaultRequestConfig(requestConfig) .setMaxConnPerRoute(DEFAULT_MAX_CONNECTIONS_PER_ROUTE) .setDefaultSocketConfig(socketConfig) .setMaxConnTotal(100000) .useSystemProperties() ; /* * disable hostname verification ... if needed */ builder.setSSLHostnameVerifier(ALLOW_ALL_VERIFIER); // add CloseableHttpClient to the map globalHttpClient = builder.build(); globalConnManagerThread = new Thread() { final Marker marker = MarkerManager.getMarker("http-pool"); private void cleanupPools() throws Exception { globalConnManagerPool.closeExpiredConnections(); globalConnManagerPool.closeIdleConnections( 5, TimeUnit.MINUTES ); } private void getPoolStats( final PoolingHttpClientConnectionManager cm ) { try { /* * inspect pool by route */ Set<HttpRoute> routes = cm.getRoutes(); if(routes != null && !routes.isEmpty()) { for(final HttpRoute r : routes) { final String hostname = r.getTargetHost().getHostName(); // get pool stats for route final PoolStats ps = cm.getStats(r); final int available = ps.getAvailable(); final int active = ps.getLeased(); final int limit = ps.getMax(); final int blocking = ps.getPending(); String s = "emit telemetry message:" + " type=hc-conn-pool" + "; route=" + hostname + "; available=" + available + "; active=" + active + "; blocking=" + blocking + "; max-allowed=" + limit + "; server-nm=" + "localhost:0000" ; log(s); } } /* * inspect pool globally across all routes */ PoolStats tps = cm.getTotalStats(); int available = tps.getAvailable(); int active = tps.getLeased(); int limit = tps.getMax(); int blocking = tps.getPending(); String s = "emit telemetry message:" + " type=hc-conn-pool" + "; route=all-routes" + "; available=" + available + "; active=" + active + "; blocking=" + blocking + "; max-allowed=" + limit + "; server-nm=" + "localhost:0000" ; log(s); } catch(Throwable t) { log("failed to log http-client connection pool stats: " + t); t.printStackTrace(System.out); } } private void logPoolStats() throws Exception { getPoolStats(globalConnManagerPool); } @Override public void run() { log("http-client connection pool monitor started."); while(true) { pause(1000); try { logPoolStats(); cleanupPools(); } catch(Throwable t) { log("http connection monitor error: " + t); t.printStackTrace(System.out); } } } }; log("starting http-client connection monitor thread..."); globalConnManagerThread.setDaemon(true); globalConnManagerThread.start(); } private static void startRequestor() throws Exception { Thread t = new Thread() { private RequestConfig requestConfig; @Override public void run() { final RequestConfig requestConfig = RequestConfig.copy(RequestConfig.DEFAULT) .setAuthenticationEnabled(true) .setConnectTimeout(120000) .setConnectionRequestTimeout(120000) .setExpectContinueEnabled(true) .setRedirectsEnabled(false) .setSocketTimeout(120000) .build() ; URL url=null; try { url = new URL("https://HOSTNAME:PORTNO/GET/SERVICE/URI"); } catch(Throwable t) { log("can't parse url"); t.printStackTrace(System.out); } final String urlText = url.toString(); final String host = url.getHost(); final int portNo = url.getPort(); // == -1 ? url.getDefaultPort() : url.getPort(); final String protocol = url.getProtocol(); final HttpHost httpHost = new HttpHost(host, portNo, protocol); final HttpRoute httpRoute = new HttpRoute(httpHost); globalConnManagerPool.setMaxPerRoute(httpRoute, 200); log("set route max-conns:" + "; max-allowed=" + 200 + "; protocol-host-port=" + protocol + "://" + host + ":" + portNo + "; httpRoute=" + String.valueOf(httpRoute) ); final String username = "USERNAME HERE"; final String password = "PASSWORD HERE"; final AuthCache authCache; final CredentialsProvider credsProvider; final AuthScope authScope = new AuthScope(url.getHost(), portNo); final UsernamePasswordCredentials basic = new UsernamePasswordCredentials(username, password); credsProvider = new BasicCredentialsProvider(); credsProvider.setCredentials(authScope, basic); // create authentication cache for preemptive authorization authCache = new BasicAuthCache(); final BasicScheme basicScheme = new BasicScheme(); authCache.put(httpHost, basicScheme); HttpClientContext httpContext = HttpClientContext.create(); httpContext.setAuthCache(authCache); httpContext.setCredentialsProvider(credsProvider); httpContext.setRequestConfig(requestConfig); for(int i=0; i < 100; i++) { pause(250); try { HttpRequestBase method = new HttpGet(urlText); method.setHeader("gws-requestId", "pkut-"+System.currentTimeMillis()); method.setHeader("gws-audit", "pk"); method.setHeader("gws-environment", "ps"); method.setHeader("gws-version", "1"); HttpResponse response = globalHttpClient.execute(method, httpContext); log("response code: " + response.getStatusLine().getStatusCode()); response.getEntity().getContent().close(); } catch(Throwable t) { log("error invoking service: " + t); t.printStackTrace(System.out); } } } }; t.start(); } public static void main(String args[]) throws Exception { initSingletons(); for(int i=0; i < 10; i++) { startRequestor(); } pause(1000*5); } } — Pete From: "ol...@apache.org<mailto:ol...@apache.org>" <ol...@apache.org<mailto:ol...@apache.org>> Reply-To: HttpClient Discussion <httpclient-users@hc.apache.org<mailto:httpclient-users@hc.apache.org>> Date: Tuesday, February 23, 2016 at 11:07 AM To: HttpClient Discussion <httpclient-users@hc.apache.org<mailto:httpclient-users@hc.apache.org>> Subject: Re: pooling connection manager: changing max per route On Tue, 2016-02-23 at 01:11 +0000, Pete Keyes wrote: version: 4.4.1 App Server: TomEE7 We use PoolingHttpClientConnectionManager with the following defaults: * max total connections: 100,000 * default max per route: 50 we create the HttpClient and PoolingHttpClientConnectionManager as static singletons at container startup. private static Thread globalConnManagerThread; private static CloseableHttpClient globalHttpClient; static { // create global pooling connection manager globalConnManagerPool = new PoolingHttpClientConnectionManager( registry ); globalConnManagerPool.setDefaultMaxPerRoute(50); globalConnManagerPool.setMaxTotal(100000); // create global HttpClient instance final HttpClientBuilder builder = HttpClients.custom() .setConnectionManager(globalConnManagerPool) .setConnectionManagerShared(true) .setConnectionTimeToLive(5, TimeUnit.MINUTES) .setDefaultConnectionConfig(connConfig) .setDefaultRequestConfig(requestConfig) .setMaxConnPerRoute(50) // <<== default per route is 50 .setDefaultSocketConfig(socketConfig) .setMaxConnTotal(100000) .setRetryHandler(httpRetryHandler) .useSystemProperties() ; Please note that connection level parameters will have no effect when the connection manager is explicitly set. Please have a look at Javadocs. CloseableHttpClient globalHttpClient = builder.build(); } We set the max per specific route with the following code snippet: @PostConstruct public void init() { List<URL> allUrls = getAllUrls(); for(final URL url : allUrls) { HttpServiceConfig hsc = getHttpServiceConfig(url); final String host = url.getHost(); final int portNo = url.getPort() == -1 ? url.getDefaultPort() : url.getPort(); final String protocol = url.getProtocol(); final HttpHost httpHost = new HttpHost(host, portNo, protocol); final HttpRoute httpRoute = new HttpRoute(httpHost); globalConnManagerPool.setMaxPerRoute(httpRoute, 200); // <<== max set to 200 for route log.info(gMarker, "set route max-conns:" + "; max-allowed=" + hsc.getMaxConns() + "; protocol-host-port=" + protocol + "://" + host + ":" + portNo + "; httpRoute=" + String.valueOf(httpRoute) ); } } we see each route specific max connection log message at startup. for instance: set route max-conns: conns-max=200; protocol-host-port=https://some-host:443; httpRoute={}->https://some-host:443 we have a background thread that monitors the apache-hc connection pool statistics and emits log messages. we see the following apache-hc connection manager pool “route specific” statistics logged: private void logPoolStats() { Set<HttpRoute> routes = globalConnManagerPool.getRoutes(); for(final HttpRoute r : routes) { final PoolStats ps = cm.getStats(r); final int available = ps.getAvailable(); final int active = ps.getLeased(); final int limit = ps.getMax(); final int blocking = ps.getPending(); String s = "emit statistic:" + " type=hc-conn-pool" + "; route=" + hostname + "; available=" + available + "; active=" + active + "; blocking=" + blocking + "; max-allowed=" + limit + "; server-nm=" + Helpers.getLocalHost() + ":" + Helpers.getServerHttpsPort() ; log.info(marker, s); } } we see log messages like the following from the apache-hc connection pool monitor logging: emit statistic: type=hc-conn-pool; route=some-host; available=1; active=0; blocking=0; max-allowed=50 The connection manager isn’t respecting the per-route connection settings. Above we see that the “max-allowed” per route reported by the pool-manager is 50 for a route that specifically set the max-allowed to 200. Any hints about what we are doing wrong to override the limit on a per-route basis? Could you please try to reproduce the issue with a test case? Oleg --------------------------------------------------------------------- To unsubscribe, e-mail: httpclient-users-unsubscr...@hc.apache.org<mailto:httpclient-users-unsubscr...@hc.apache.org> For additional commands, e-mail: httpclient-users-h...@hc.apache.org<mailto:httpclient-users-h...@hc.apache.org>