On Wednesday 22 February 2012, Stefan Fritsch wrote:
> On Wed, 22 Feb 2012, Rainer Jung wrote:
> > Looking at the server status on www.apache.org running 2.3.15 one
> > can see, that about 50% of the async connections are in closing
> > state.
> > 
> > We created AsyncRequestWorkerFactor to control the amount of
> > overcommitment in terms of connections relative to idle workers
> > we allow for each process. The formula is
> > 
> > Connections =
> > 
> > ThreadsPerChild + (AsyncRequestWorkerFactor * idle_workers) =
> > 
> > busy_workers + ((AsyncRequestWorkerFactor+1) * number of idle
> > workers)
> > 
> > Default value for AsyncRequestWorkerFactor is "2", so per default
> > 
> > Connections = busy + 3 * idle.
> > 
> > Now if the situation on www.apache.org turns out to be typical,
> > 50% of Connections are in closing state, but we don't expect
> > those to need any more worker thread. So the overcommitment
> > would reduce from a factor of 3 to something closer to 1.5,
> > which isn't much.
> > 
> > Would it be feasible to only count the number of non-closing
> > connections, so allow new connections for a process as long as
> > 
> > Non_Closing_Connections <=
> > 
> > ThreadsPerChild + (AsyncRequestWorkerFactor * idle_workers) =
> > 
> > Comments?
> 
> That's certainly a possible improvement.

I did the attached patches last weekend. They should do it, but I 
haven't had any chance to test them properly and won't have time in 
the next 1-2 weeks. If someone wants to try them, please go ahead.

Cheers,
Stefan
From e3b46b2e875a2658fda656c8ce9ed191928deaa5 Mon Sep 17 00:00:00 2001
From: Stefan Fritsch <[email protected]>
Date: Mon, 27 Feb 2012 22:49:15 +0100
Subject: [PATCH 2/2] don't count lingering conns agains asyncworkerfactor

---
 server/mpm/event/event.c |   17 ++++++++++-------
 1 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
index e5e92aa..d5c5195 100644
--- a/server/mpm/event/event.c
+++ b/server/mpm/event/event.c
@@ -1631,9 +1631,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                                  "All workers busy, not accepting new conns"
                                  "in this process");
                 }
-                else if (apr_atomic_read32(&connection_count) > threads_per_child
-                         + ap_queue_info_get_idlers(worker_queue_info) *
-                           worker_factor / WORKER_FACTOR_SCALE)
+                else if (  (int)apr_atomic_read32(&connection_count)
+                           - (int)apr_atomic_read32(&lingering_count)
+                         > threads_per_child
+                           + ap_queue_info_get_idlers(worker_queue_info) *
+                             worker_factor / WORKER_FACTOR_SCALE)
                 {
                     if (!listeners_disabled)
                         disable_listensocks(process_slot);
@@ -1767,10 +1769,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
             ps->suspended = apr_atomic_read32(&suspended_count);
             ps->lingering_close = apr_atomic_read32(&lingering_count);
         }
-        if (listeners_disabled && !workers_were_busy &&
-            (int)apr_atomic_read32(&connection_count) <
-            ((int)ap_queue_info_get_idlers(worker_queue_info) - 1) *
-            worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+        if (listeners_disabled && !workers_were_busy
+            && (int)apr_atomic_read32(&connection_count)
+               - (int)apr_atomic_read32(&lingering_count)
+               < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1)
+                 * worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
         {
             listeners_disabled = 0;
             enable_listensocks(process_slot);
-- 
1.7.9.1

From 2eaa7b714155253025701304890c0da2eb9f2902 Mon Sep 17 00:00:00 2001
From: Stefan Fritsch <[email protected]>
Date: Sun, 26 Feb 2012 22:32:25 +0100
Subject: [PATCH 1/2] add counters for clogged, lingering, and suspended
 connections

---
 server/mpm/event/event.c |   50 ++++++++++++++++++++++++++++++++++++---------
 1 files changed, 40 insertions(+), 10 deletions(-)

diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
index 20b15e7..e5e92aa 100644
--- a/server/mpm/event/event.c
+++ b/server/mpm/event/event.c
@@ -177,7 +177,10 @@ static int start_thread_may_exit = 0;
 static int listener_may_exit = 0;
 static int requests_this_child;
 static int num_listensocks = 0;
-static apr_uint32_t connection_count = 0;
+static apr_uint32_t connection_count = 0; /* Number of open connections */
+static apr_uint32_t lingering_count = 0;  /* Number of connections in lingering close */
+static apr_uint32_t suspended_count = 0;  /* Number of suspended connections */
+static apr_uint32_t clogged_count = 0;    /* Number of threads processing ssl conns */
 static int resource_shortage = 0;
 static fd_queue_t *worker_queue;
 static fd_queue_info_t *worker_queue_info;
@@ -389,8 +392,12 @@ static void enable_listensocks(int process_slot)
     int i;
     ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00457)
                  "Accepting new connections again: "
-                 "%u active conns, %u idle workers",
+                 "%u active conns (%u lingering/%u clogged/%u suspended), "
+                 "%u idle workers",
                  apr_atomic_read32(&connection_count),
+                 apr_atomic_read32(&lingering_count),
+                 apr_atomic_read32(&clogged_count),
+                 apr_atomic_read32(&suspended_count),
                  ap_queue_info_get_idlers(worker_queue_info));
     for (i = 0; i < num_listensocks; i++)
         apr_pollset_add(event_pollset, &listener_pollfd[i]);
@@ -607,7 +614,20 @@ static int child_fatal;
 static int volatile shutdown_pending;
 static int volatile restart_pending;
 
-static apr_status_t decrement_connection_count(void *dummy) {
+static apr_status_t decrement_connection_count(void *cs_)
+{
+    event_conn_state_t *cs = cs_;
+    switch (cs->pub.state) {
+        case CONN_STATE_LINGER_NORMAL:
+        case CONN_STATE_LINGER_SHORT:
+            apr_atomic_dec32(&lingering_count);
+            break;
+        case CONN_STATE_SUSPENDED:
+            apr_atomic_dec32(&suspended_count);
+            break;
+        default:
+            break;
+    }
     apr_atomic_dec32(&connection_count);
     return APR_SUCCESS;
 }
@@ -785,6 +805,7 @@ static int start_lingering_close_common(event_conn_state_t *cs)
         q = &linger_q;
         cs->pub.state = CONN_STATE_LINGER_NORMAL;
     }
+    apr_atomic_inc32(&lingering_count);
     apr_thread_mutex_lock(timeout_mutex);
     TO_QUEUE_APPEND(*q, cs);
     cs->pfd.reqevents = APR_POLLIN | APR_POLLHUP | APR_POLLERR;
@@ -899,7 +920,8 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
             return 1;
         }
         apr_atomic_inc32(&connection_count);
-        apr_pool_cleanup_register(c->pool, NULL, decrement_connection_count, apr_pool_cleanup_null);
+        apr_pool_cleanup_register(c->pool, cs, decrement_connection_count,
+                                  apr_pool_cleanup_null);
         c->current_thread = thd;
         cs->c = c;
         c->cs = &(cs->pub);
@@ -948,10 +970,12 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
          * like mod_ssl, lets just do the normal read from input filters,
          * like the Worker MPM does.
          */
+        apr_atomic_inc32(&clogged_count);
         ap_run_process_connection(c);
         if (cs->pub.state != CONN_STATE_SUSPENDED) {
             cs->pub.state = CONN_STATE_LINGER;
         }
+        apr_atomic_dec32(&clogged_count);
     }
 
 read_request:
@@ -1039,6 +1063,9 @@ read_request:
             AP_DEBUG_ASSERT(rc == APR_SUCCESS);
         }
     }
+    else if (cs->pub.state == CONN_STATE_SUSPENDED) {
+        apr_atomic_inc32(&suspended_count);
+    }
     /*
      * Prevent this connection from writing to our connection state after it
      * is no longer associated with this thread. This would happen if the EOR
@@ -1458,11 +1485,14 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                 last_log = now;
                 apr_thread_mutex_lock(timeout_mutex);
                 ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
-                             "connections: %d (write-completion: %d "
-                             "keep-alive: %d lingering: %d)",
-                             connection_count, write_completion_q.count,
+                             "connections: %u (clogged: %u write-completion: %d "
+                             "keep-alive: %d lingering: %d suspended: %u)",
+                             apr_atomic_read32(&connection_count),
+                             apr_atomic_read32(&clogged_count),
+                             write_completion_q.count,
                              keepalive_q.count,
-                             linger_q.count + short_linger_q.count);
+                             apr_atomic_read32(&lingering_count),
+                             apr_atomic_read32(&suspended_count));
                 apr_thread_mutex_unlock(timeout_mutex);
             }
         }
@@ -1730,12 +1760,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 
             ps = ap_get_scoreboard_process(process_slot);
             ps->write_completion = write_completion_q.count;
-            ps->lingering_close = linger_q.count + short_linger_q.count;
             ps->keep_alive = keepalive_q.count;
             apr_thread_mutex_unlock(timeout_mutex);
 
             ps->connections = apr_atomic_read32(&connection_count);
-            /* XXX: should count CONN_STATE_SUSPENDED and set ps->suspended */
+            ps->suspended = apr_atomic_read32(&suspended_count);
+            ps->lingering_close = apr_atomic_read32(&lingering_count);
         }
         if (listeners_disabled && !workers_were_busy &&
             (int)apr_atomic_read32(&connection_count) <
-- 
1.7.9.1

Reply via email to