On Tue, Jan 08, 2019 at 03:46:48PM +0100, Stefan Sperling wrote:
> mod_proxy fails to compile when APR doesn't have thread support.
> I don't know if this is supposed to be a supported configuration,
> but this problem did not exist with HTTPD 2.2; it showed up in 2.4.
>
> The patch below adds checks for APR_HAS_THREADS and passes test
> builds with both threaded and non-threaded APR from APR's trunk.
> Is this the right approach?
>
> I also found that the http2 module won't build without thread support.
> I can simply omit http2 from my SVN dev builds, for now. But mod_proxy
> is required by mod_dav_svn for SVN's write-through proxy feature.
Any feedback on this? Should I just commit it if I don't hear anything?
> Index: modules/proxy/mod_proxy.h
> ===
> --- modules/proxy/mod_proxy.h (revision 1850749)
> +++ modules/proxy/mod_proxy.h (working copy)
> @@ -472,7 +472,9 @@ struct proxy_worker {
> proxy_conn_pool *cp;/* Connection pool to use */
> proxy_worker_shared *s; /* Shared data */
> proxy_balancer *balancer; /* which balancer am I in? */
> +#if APR_HAS_THREADS
> apr_thread_mutex_t *tmutex; /* Thread lock for updating address cache */
> +#endif
> void*context; /* general purpose storage */
> ap_conf_vector_t *section_config; /* -section wherein defined */
> };
> @@ -528,8 +530,10 @@ struct proxy_balancer {
> proxy_hashes hash;
> apr_time_t wupdated;/* timestamp of last change to workers list
> */
> proxy_balancer_method *lbmethod;
> +#if APR_HAS_THREADS
> apr_global_mutex_t *gmutex; /* global lock for updating list of workers
> */
> apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */
> +#endif
> proxy_server_conf *sconf;
> void*context;/* general purpose storage */
> proxy_balancer_shared *s;/* Shared data */
> Index: modules/proxy/mod_proxy_balancer.c
> ===
> --- modules/proxy/mod_proxy_balancer.c(revision 1850749)
> +++ modules/proxy/mod_proxy_balancer.c(working copy)
> @@ -346,6 +346,7 @@ static proxy_worker *find_best_worker(proxy_balanc
> proxy_worker *candidate = NULL;
> apr_status_t rv;
>
> +#if APR_HAS_THREADS
> if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
> ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
>"%s: Lock failed for find_best_worker()",
> @@ -352,6 +353,7 @@ static proxy_worker *find_best_worker(proxy_balanc
>balancer->s->name);
> return NULL;
> }
> +#endif
>
> candidate = (*balancer->lbmethod->finder)(balancer, r);
>
> @@ -358,11 +360,13 @@ static proxy_worker *find_best_worker(proxy_balanc
> if (candidate)
> candidate->s->elected++;
>
> +#if APR_HAS_THREADS
> if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
> ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
>"%s: Unlock failed for find_best_worker()",
>balancer->s->name);
> }
> +#endif
>
> if (candidate == NULL) {
> /* All the workers are in error state or disabled.
> @@ -492,11 +496,13 @@ static int proxy_balancer_pre_request(proxy_worker
> /* Step 2: Lock the LoadBalancer
> * XXX: perhaps we need the process lock here
> */
> +#if APR_HAS_THREADS
> if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
> ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
>"%s: Lock failed for pre_request",
> (*balancer)->s->name);
> return DECLINED;
> }
> +#endif
>
> /* Step 3: force recovery */
> force_recovery(*balancer, r->server);
> @@ -557,20 +563,24 @@ static int proxy_balancer_pre_request(proxy_worker
> ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
>"%s: All workers are in error state for route
> (%s)",
>(*balancer)->s->name, route);
> +#if APR_HAS_THREADS
> if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
> ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
>"%s: Unlock failed for pre_request",
>(*balancer)->s->name);
> }
> +#endif
> return HTTP_SERVICE_UNAVAILABLE;
> }
> }
>
> +#if APR_HAS_THREADS
> if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
> ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
>"%s: Unlock failed for pre_request",
>(*balancer)->s->name);
> }
> +#endif
> if (!*worker) {
> runtime = find_best_worker(*balancer, r);
> if (!runtime) {
> @@ -644,6 +654,7 @@ static int pr