On Tue, Jan 08, 2019 at 03:46:48PM +0100, Stefan Sperling wrote:
> mod_proxy fails to compile when APR doesn't have thread support.
> I don't know if this is supposed to be a supported configuration,
> but this problem did not exist with HTTPD 2.2; it showed up in 2.4.
> 
> The patch below adds checks for APR_HAS_THREADS and passes test
> builds with both threaded and non-threaded APR from APR's trunk.
> Is this the right approach?
> 
> I also found that the http2 module won't build without thread support.
> I can simply omit http2 from my SVN dev builds, for now. But mod_proxy
> is required by mod_dav_svn for SVN's write-through proxy feature.

Any feedback on this? Should I just commit it if I don't hear anything?

> Index: modules/proxy/mod_proxy.h
> ===================================================================
> --- modules/proxy/mod_proxy.h (revision 1850749)
> +++ modules/proxy/mod_proxy.h (working copy)
> @@ -472,7 +472,9 @@ struct proxy_worker {
>      proxy_conn_pool     *cp;    /* Connection pool to use */
>      proxy_worker_shared   *s;   /* Shared data */
>      proxy_balancer  *balancer;  /* which balancer am I in? */
> +#if APR_HAS_THREADS
>      apr_thread_mutex_t  *tmutex; /* Thread lock for updating address cache */
> +#endif
>      void            *context;   /* general purpose storage */
>      ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
>  };
> @@ -528,8 +530,10 @@ struct proxy_balancer {
>      proxy_hashes hash;
>      apr_time_t      wupdated;    /* timestamp of last change to workers list 
> */
>      proxy_balancer_method *lbmethod;
> +#if APR_HAS_THREADS
>      apr_global_mutex_t  *gmutex; /* global lock for updating list of workers 
> */
>      apr_thread_mutex_t  *tmutex; /* Thread lock for updating shm */
> +#endif
>      proxy_server_conf *sconf;
>      void            *context;    /* general purpose storage */
>      proxy_balancer_shared *s;    /* Shared data */
> Index: modules/proxy/mod_proxy_balancer.c
> ===================================================================
> --- modules/proxy/mod_proxy_balancer.c        (revision 1850749)
> +++ modules/proxy/mod_proxy_balancer.c        (working copy)
> @@ -346,6 +346,7 @@ static proxy_worker *find_best_worker(proxy_balanc
>      proxy_worker *candidate = NULL;
>      apr_status_t rv;
>  
> +#if APR_HAS_THREADS
>      if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
>          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
>                        "%s: Lock failed for find_best_worker()",
> @@ -352,6 +353,7 @@ static proxy_worker *find_best_worker(proxy_balanc
>                        balancer->s->name);
>          return NULL;
>      }
> +#endif
>  
>      candidate = (*balancer->lbmethod->finder)(balancer, r);
>  
> @@ -358,11 +360,13 @@ static proxy_worker *find_best_worker(proxy_balanc
>      if (candidate)
>          candidate->s->elected++;
>  
> +#if APR_HAS_THREADS
>      if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
>          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
>                        "%s: Unlock failed for find_best_worker()",
>                        balancer->s->name);
>      }
> +#endif
>  
>      if (candidate == NULL) {
>          /* All the workers are in error state or disabled.
> @@ -492,11 +496,13 @@ static int proxy_balancer_pre_request(proxy_worker
>      /* Step 2: Lock the LoadBalancer
>       * XXX: perhaps we need the process lock here
>       */
> +#if APR_HAS_THREADS
>      if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
>          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
>                        "%s: Lock failed for pre_request", 
> (*balancer)->s->name);
>          return DECLINED;
>      }
> +#endif
>  
>      /* Step 3: force recovery */
>      force_recovery(*balancer, r->server);
> @@ -557,20 +563,24 @@ static int proxy_balancer_pre_request(proxy_worker
>              ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
>                            "%s: All workers are in error state for route 
> (%s)",
>                            (*balancer)->s->name, route);
> +#if APR_HAS_THREADS
>              if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
>                  ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
>                                "%s: Unlock failed for pre_request",
>                                (*balancer)->s->name);
>              }
> +#endif
>              return HTTP_SERVICE_UNAVAILABLE;
>          }
>      }
>  
> +#if APR_HAS_THREADS
>      if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
>          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
>                        "%s: Unlock failed for pre_request",
>                        (*balancer)->s->name);
>      }
> +#endif
>      if (!*worker) {
>          runtime = find_best_worker(*balancer, r);
>          if (!runtime) {
> @@ -644,6 +654,7 @@ static int proxy_balancer_post_request(proxy_worke
>  
>      apr_status_t rv;
>  
> +#if APR_HAS_THREADS
>      if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
>          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
>                        "%s: Lock failed for post_request",
> @@ -650,6 +661,7 @@ static int proxy_balancer_post_request(proxy_worke
>                        balancer->s->name);
>          return HTTP_INTERNAL_SERVER_ERROR;
>      }
> +#endif
>  
>      if (!apr_is_empty_array(balancer->errstatuses)
>          && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
> @@ -681,11 +693,12 @@ static int proxy_balancer_post_request(proxy_worke
>          worker->s->error_time = apr_time_now();
>  
>      }
> -
> +#if APR_HAS_THREADS
>      if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
>          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
>                        "%s: Unlock failed for post_request", 
> balancer->s->name);
>      }
> +#endif
>      ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
>                    "proxy_balancer_post_request for (%s)", balancer->s->name);
>  
> @@ -715,7 +728,9 @@ static void recalc_factors(proxy_balancer *balance
>  
>  static apr_status_t lock_remove(void *data)
>  {
> +#if APR_HAS_THREADS
>      int i;
> +#endif
>      proxy_balancer *balancer;
>      server_rec *s = data;
>      void *sconf = s->module_config;
> @@ -722,6 +737,7 @@ static apr_status_t lock_remove(void *data)
>      proxy_server_conf *conf = (proxy_server_conf *) 
> ap_get_module_config(sconf, &proxy_module);
>  
>      balancer = (proxy_balancer *)conf->balancers->elts;
> +#if APR_HAS_THREADS
>      for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
>          if (balancer->gmutex) {
>              apr_global_mutex_destroy(balancer->gmutex);
> @@ -728,6 +744,7 @@ static apr_status_t lock_remove(void *data)
>              balancer->gmutex = NULL;
>          }
>      }
> +#endif
>      return(0);
>  }
>  
> @@ -943,7 +960,7 @@ static int balancer_post_config(apr_pool_t *pconf,
>              PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will 
> succeed */
>  
>              balancer->max_workers = balancer->workers->nelts + 
> balancer->growth;
> -
> +#if APR_HAS_THREADS
>              /* Create global mutex */
>              rv = ap_global_mutex_create(&(balancer->gmutex), NULL, 
> balancer_mutex_type,
>                                          balancer->s->sname, s, pconf, 0);
> @@ -953,7 +970,7 @@ static int balancer_post_config(apr_pool_t *pconf,
>                               balancer->s->sname);
>                  return HTTP_INTERNAL_SERVER_ERROR;
>              }
> -
> +#endif
>              apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
>                                        apr_pool_cleanup_null);
>  
> @@ -1133,17 +1150,21 @@ static int balancer_handler(request_rec *r)
>  
>      balancer = (proxy_balancer *)conf->balancers->elts;
>      for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
> +#if APR_HAS_THREADS
>          if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
>              ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
>                            "%s: Lock failed for balancer_handler",
>                            balancer->s->name);
>          }
> +#endif
>          ap_proxy_sync_balancer(balancer, r->server, conf);
> +#if APR_HAS_THREADS
>          if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
>              ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
>                            "%s: Unlock failed for balancer_handler",
>                            balancer->s->name);
>          }
> +#endif
>      }
>  
>      if (r->args && (r->method_number == M_GET)) {
> @@ -1356,11 +1377,13 @@ static int balancer_handler(request_rec *r)
>              proxy_worker *nworker;
>              nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
>              if (!nworker && storage->num_free_slots(bsel->wslot)) {
> +#if APR_HAS_THREADS
>                  if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
>                      ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, 
> APLOGNO(01194)
>                                    "%s: Lock failed for adding worker",
>                                    bsel->s->name);
>                  }
> +#endif
>                  ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, 
> conf, val, 0);
>                  if (!ret) {
>                      unsigned int index;
> @@ -1369,41 +1392,49 @@ static int balancer_handler(request_rec *r)
>                      if ((rv = storage->grab(bsel->wslot, &index)) != 
> APR_SUCCESS) {
>                          ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, 
> APLOGNO(01195)
>                                        "worker slotmem_grab failed");
> +#if APR_HAS_THREADS
>                          if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) 
> {
>                              ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, 
> APLOGNO(01196)
>                                            "%s: Unlock failed for adding 
> worker",
>                                            bsel->s->name);
>                          }
> +#endif
>                          return HTTP_BAD_REQUEST;
>                      }
>                      if ((rv = storage->dptr(bsel->wslot, index, (void 
> *)&shm)) != APR_SUCCESS) {
>                          ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, 
> APLOGNO(01197)
>                                        "worker slotmem_dptr failed");
> +#if APR_HAS_THREADS
>                          if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) 
> {
>                              ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, 
> APLOGNO(01198)
>                                            "%s: Unlock failed for adding 
> worker",
>                                            bsel->s->name);
>                          }
> +#endif
>                          return HTTP_BAD_REQUEST;
>                      }
>                      if ((rv = ap_proxy_share_worker(nworker, shm, index)) != 
> APR_SUCCESS) {
>                          ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, 
> APLOGNO(01199)
>                                        "Cannot share worker");
> +#if APR_HAS_THREADS
>                          if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) 
> {
>                              ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, 
> APLOGNO(01200)
>                                            "%s: Unlock failed for adding 
> worker",
>                                            bsel->s->name);
>                          }
> +#endif
>                          return HTTP_BAD_REQUEST;
>                      }
>                      if ((rv = ap_proxy_initialize_worker(nworker, r->server, 
> conf->pool)) != APR_SUCCESS) {
>                          ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, 
> APLOGNO(01201)
>                                        "Cannot init worker");
> +#if APR_HAS_THREADS
>                          if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) 
> {
>                              ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, 
> APLOGNO(01202)
>                                            "%s: Unlock failed for adding 
> worker",
>                                            bsel->s->name);
>                          }
> +#endif
>                          return HTTP_BAD_REQUEST;
>                      }
>                      /* sync all timestamps */
> @@ -1414,14 +1445,18 @@ static int balancer_handler(request_rec *r)
>                              ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, 
> APLOGNO(01207)
>                                    "%s: failed to add worker %s",
>                                    bsel->s->name, val);
> +#if APR_HAS_THREADS
>                      PROXY_GLOBAL_UNLOCK(bsel);
> +#endif
>                      return HTTP_BAD_REQUEST;
>                  }
> +#if APR_HAS_THREADS
>                  if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
>                      ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, 
> APLOGNO(01203)
>                                    "%s: Unlock failed for adding worker",
>                                    bsel->s->name);
>                  }
> +#endif
>              } else {
>                  ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
>                                    "%s: failed to add worker %s",
> Index: modules/proxy/mod_proxy_ftp.c
> ===================================================================
> --- modules/proxy/mod_proxy_ftp.c     (revision 1850749)
> +++ modules/proxy/mod_proxy_ftp.c     (working copy)
> @@ -1119,10 +1119,12 @@ static int proxy_ftp_handler(request_rec *r, proxy
>  
>      if (worker->s->is_address_reusable) {
>          if (!worker->cp->addr) {
> +#if APR_HAS_THREADS
>              if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
>                  ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) 
> "lock");
>                  return HTTP_INTERNAL_SERVER_ERROR;
>              }
> +#endif
>          }
>          connect_addr = worker->cp->addr;
>          address_pool = worker->cp->pool;
> @@ -1138,9 +1140,11 @@ static int proxy_ftp_handler(request_rec *r, proxy
>                                      address_pool);
>      if (worker->s->is_address_reusable && !worker->cp->addr) {
>          worker->cp->addr = connect_addr;
> +#if APR_HAS_THREADS
>          if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
>              ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) 
> "unlock");
>          }
> +#endif
>      }
>      /*
>       * get all the possible IP addresses for the destname and loop through
> Index: modules/proxy/proxy_util.c
> ===================================================================
> --- modules/proxy/proxy_util.c        (revision 1850749)
> +++ modules/proxy/proxy_util.c        (working copy)
> @@ -1167,8 +1167,10 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr
>      lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
>  
>      (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
> +#if APR_HAS_THREADS
>      (*balancer)->gmutex = NULL;
>      (*balancer)->tmutex = NULL;
> +#endif
>      (*balancer)->lbmethod = lbmethod;
>  
>      if (do_malloc)
> @@ -1271,6 +1273,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_ba
>       * for each balancer we need to init the global
>       * mutex and then attach to the shared worker shm
>       */
> +#if APR_HAS_THREADS
>      if (!balancer->gmutex) {
>          ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919)
>                       "no mutex %s", balancer->s->name);
> @@ -1287,6 +1290,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_ba
>                       balancer->s->name);
>          return rv;
>      }
> +#endif
>  
>      /* now attach */
>      storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);
> @@ -1297,6 +1301,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_ba
>      if (balancer->lbmethod && balancer->lbmethod->reset)
>          balancer->lbmethod->reset(balancer, s);
>  
> +#if APR_HAS_THREADS
>      if (balancer->tmutex == NULL) {
>          rv = apr_thread_mutex_create(&(balancer->tmutex), 
> APR_THREAD_MUTEX_DEFAULT, p);
>          if (rv != APR_SUCCESS) {
> @@ -1305,6 +1310,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_ba
>              return rv;
>          }
>      }
> +#endif
>      return APR_SUCCESS;
>  }
>  
> @@ -2035,6 +2041,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_wo
>                       ap_proxy_worker_name(p, worker));
>          apr_global_mutex_lock(proxy_mutex);
>          /* Now init local worker data */
> +#if APR_HAS_THREADS
>          if (worker->tmutex == NULL) {
>              rv = apr_thread_mutex_create(&(worker->tmutex), 
> APR_THREAD_MUTEX_DEFAULT, p);
>              if (rv != APR_SUCCESS) {
> @@ -2044,6 +2051,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_wo
>                  return rv;
>              }
>          }
> +#endif
>          if (worker->cp == NULL)
>              init_conn_pool(p, worker);
>          if (worker->cp == NULL) {
> @@ -2535,10 +2543,12 @@ ap_proxy_determine_connection(apr_pool_t *p, reque
>               * we can reuse the address.
>               */
>              if (!worker->cp->addr) {
> +#if APR_HAS_THREADS
>                  if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
>                      ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, 
> APLOGNO(00945) "lock");
>                      return HTTP_INTERNAL_SERVER_ERROR;
>                  }
> +#endif
>  
>                  /*
>                   * Worker can have the single constant backend address.
> @@ -2551,9 +2561,11 @@ ap_proxy_determine_connection(apr_pool_t *p, reque
>                                              conn->port, 0,
>                                              worker->cp->pool);
>                  conn->addr = worker->cp->addr;
> +#if APR_HAS_THREADS
>                  if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
>                      ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, 
> APLOGNO(00946) "unlock");
>                  }
> +#endif
>              }
>              else {
>                  conn->addr = worker->cp->addr;
> @@ -3483,7 +3495,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer
>              (*runtime)->cp = NULL;
>              (*runtime)->balancer = b;
>              (*runtime)->s = shm;
> +#if APR_HAS_THREADS
>              (*runtime)->tmutex = NULL;
> +#endif
>              rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
>              if (rv != APR_SUCCESS) {
>                  ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) 
> "Cannot init worker");

Reply via email to