Hello,
First sorry if the terminology in this message is a bit confusing, but
I'll have to talk about apr_thread (threads), apr_thread's pool (the
pool associated with / assigned to a thread for its maintenance and
thread data), and apr_threadpool (the pool/reserve of threads to
schedule tasks)..
Also it's a bit long so TL;DR:
apr_threadpool makes use of detached threads and it's crashy (and/or
deadlocky with APR_POOL_DEBUG).
You were warned ;)
So with current trunk, when compiled with APR_POOL_DEBUG, the tests
suite deadlocks in testpass (the only user of apr_threadpool in
tests), see stacktrace in attached "deadlock-gdb.txt".
Without APR_POOL_DEBUG it crashes if you insist a bit, or add a small
delay to apr_threadpool's thread_pool_cleanup(), stacktrace omitted :p
While the deadlock happens with the latest trunk only (i.e. not before
r1883800 where I added unconditional parent locking), the crash
existed long before.
I tried to fix it by moving the destruction of the apr_thread's pool
from apr_thread_exit() (and from dummy_worker() when apr_thread_exit()
is not called) to apr_thread_join(), but for non-detached threads
which seems sensible. See attached "thread_cleanup-unix.diff" (these
are the unix changes only to keep it simple, though I have the full
patch for all OSes).
Now testpass crashes because apr_threadpool detaches its threads when
cleaning up. Hmm?!?! How so?
If we want detached threads we need an unmanaged pool for them, either
at apr_thread_create() time if the passed in apr_threadattr_t tells
so, or switch from a normal pool to an unmanaged pool on
apr_thread_detach() if its called later (which of course
apr_threadpool does..).
Wait! Detached threads are evil right?
So now I go change apr_threadpool so that it does not detach threads
and joins all of them on cleanup, see attached
"threadpool_no_detach.diff".
All tests pass now..
Are these changes acceptable? WDYT?
Regards;
Yann.
$ (cd test && gdb ./testall)
<snip>
(gdb) run testpass
Starting program: /home/yle/src/apache/asf/apr/trunk.ro/test/testall testpass
<snip>
testpass : |
[New Thread 0x7ffff7cb4700 (LWP 2898012)]
[New Thread 0x7ffff74b3700 (LWP 2898013)]
[New Thread 0x7ffff6cb2700 (LWP 2898014)]
[New Thread 0x7ffff64b1700 (LWP 2898015)]
[New Thread 0x7ffff5cb0700 (LWP 2898016)]
[New Thread 0x7ffff54af700 (LWP 2898017)]
[New Thread 0x7ffff4cae700 (LWP 2898018)]
[New Thread 0x7ffff44ad700 (LWP 2898019)]
[New Thread 0x7ffff3cac700 (LWP 2898020)]
[New Thread 0x7ffff34ab700 (LWP 2898021)]
|
^C
Thread 1 "testall" received signal SIGINT, Interrupt.
<Here I stopped the execution not making progress>
(gdb) thread apply all bt
Thread 11 (Thread 0x7ffff34ab700 (LWP 2898021) "testall"):
#0 __lll_lock_wait (futex=futex@entry=0x555555628348, private=0) at
lowlevellock.c:52
#1 0x00007ffff7f0e8d1 in __GI___pthread_mutex_lock (mutex=0x555555628348) at
../nptl/pthread_mutex_lock.c:115
#2 0x00007ffff7f9d010 in apr_thread_mutex_lock (mutex=0x555555628340) at
locks/unix/thread_mutex.c:156
#3 0x00007ffff7f9dd03 in parent_lock (pool=0x55555562be20) at
memory/unix/apr_pools.c:1491
#4 0x00007ffff7f9e48e in apr_pool_destroy_debug (pool=0x55555562be20,
file_line=0x7ffff7fb6718 "threadproc/unix/thread.c:211") at
memory/unix/apr_pools.c:2005
#5 0x00007ffff7fada1a in apr_thread_exit (thd=0x55555562bdd0, retval=0) at
threadproc/unix/thread.c:211
#6 0x00007ffff7f90fb5 in thread_pool_func (t=0x55555562bdd0,
param=0x55555562a350) at util-misc/apr_thread_pool.c:327
#7 0x00007ffff7fad85a in dummy_worker (opaque=0x55555562bdd0) at
threadproc/unix/thread.c:145
#8 0x00007ffff7f0bea7 in start_thread (arg=<optimized out>) at
pthread_create.c:477
#9 0x00007ffff7e3bd4f in clone () at
../sysdeps/unix/sysv/linux/x86_64/clone.S:95
<... Same for threads 10..2 ...>
Thread 2 (Thread 0x7ffff7cb4700 (LWP 2898012) "testall"):
#0 __lll_lock_wait (futex=futex@entry=0x555555628348, private=0) at
lowlevellock.c:52
#1 0x00007ffff7f0e8d1 in __GI___pthread_mutex_lock (mutex=0x555555628348) at
../nptl/pthread_mutex_lock.c:115
#2 0x00007ffff7f9d010 in apr_thread_mutex_lock (mutex=0x555555628340) at
locks/unix/thread_mutex.c:156
#3 0x00007ffff7f9dd03 in parent_lock (pool=0x55555562ab00) at
memory/unix/apr_pools.c:1491
#4 0x00007ffff7f9e48e in apr_pool_destroy_debug (pool=0x55555562ab00,
file_line=0x7ffff7fb6718 "threadproc/unix/thread.c:211") at
memory/unix/apr_pools.c:2005
#5 0x00007ffff7fada1a in apr_thread_exit (thd=0x55555562aab0, retval=0) at
threadproc/unix/thread.c:211
#6 0x00007ffff7f90fb5 in thread_pool_func (t=0x55555562aab0,
param=0x55555562a350) at util-misc/apr_thread_pool.c:327
#7 0x00007ffff7fad85a in dummy_worker (opaque=0x55555562aab0) at
threadproc/unix/thread.c:145
#8 0x00007ffff7f0bea7 in start_thread (arg=<optimized out>) at
pthread_create.c:477
#9 0x00007ffff7e3bd4f in clone () at
../sysdeps/unix/sysv/linux/x86_64/clone.S:95
Thread 1 (Thread 0x7ffff7cb5780 (LWP 2898008) "testall"):
#0 __pthread_clockjoin_ex (threadid=140737350682368,
thread_return=0x7fffffffd890, clockid=<optimized out>, abstime=<optimized out>,
block=<optimized out>) at pthread_join_common.c:145
#1 0x00007ffff7fada4e in apr_thread_join (retval=0x7fffffffd8cc,
thd=0x55555562aab0) at threadproc/unix/thread.c:221
#2 0x00007ffff7f92169 in trim_idle_threads (me=0x55555562a350, cnt=4) at
util-misc/apr_thread_pool.c:852
#3 0x00007ffff7f9228a in apr_thread_pool_idle_max_set (me=0x55555562a350,
cnt=0) at util-misc/apr_thread_pool.c:879
#4 0x00007ffff7f90fe8 in thread_pool_cleanup (me=0x55555562a350) at
util-misc/apr_thread_pool.c:336
#5 0x00007ffff7f9f42e in run_cleanups (cref=0x55555562a4c0) at
memory/unix/apr_pools.c:2687
#6 0x00007ffff7f9e19a in pool_clear_debug (pool=0x55555562a430,
file_line=0x7ffff7fb4a28 "util-misc/apr_thread_pool.c:399") at
memory/unix/apr_pools.c:1865
#7 0x00007ffff7f9e3d7 in pool_destroy_debug (pool=0x55555562a430,
file_line=0x7ffff7fb4a28 "util-misc/apr_thread_pool.c:399") at
memory/unix/apr_pools.c:1963
#8 0x00007ffff7f9e4a5 in apr_pool_destroy_debug (pool=0x55555562a430,
file_line=0x7ffff7fb4a28 "util-misc/apr_thread_pool.c:399") at
memory/unix/apr_pools.c:2012
#9 0x00007ffff7f911f4 in apr_thread_pool_destroy (me=0x55555562a350) at
util-misc/apr_thread_pool.c:399
#10 0x000055555558e097 in test_threadsafe (tc=0x7fffffffda70, data=0x0) at
testpass.c:116
#11 0x00005555555648cd in abts_run_test (ts=0x555555628990, f=0x55555558dfeb
<test_threadsafe>, value=0x0) at abts.c:177
#12 0x000055555558e426 in testpass (suite=0x555555628990) at testpass.c:206
#13 0x000055555556551b in main (argc=2, argv=0x7fffffffdbe8) at abts.c:482
(gdb) thread 1
[Switching to thread 1 (Thread 0x7ffff7cb5780 (LWP 2898008))]
<snip>
(gdb) frame 6
#6 0x00007ffff7f9e19a in pool_clear_debug (pool=0x55555562a430,
file_line=0x7ffff7fb4a28 "util-misc/apr_thread_pool.c:399") at
memory/unix/apr_pools.c:1865
1865 run_cleanups(&pool->pre_cleanups);
(gdb) p *pool
$1 = {parent = 0x555555628880, child = 0x55555562be20, sibling =
0x555555629c50, ref = 0x555555628888, cleanups = 0x55555562a9b0, free_cleanups
= 0x0, allocator = 0x0, subprocesses = 0x0, abort_fn = 0x0, user_data = 0x0,
tag = 0x7ffff7fb4a08 "util-misc/apr_thread_pool.c:363", joined = 0x0, nodes =
0x55555562a520, file_line = 0x7ffff7fb4a08 "util-misc/apr_thread_pool.c:363",
creation_flags = 0, stat_alloc = 61, stat_total_alloc = 61, stat_clear = 0,
owner = 140737350686592, mutex = 0x555555628340, pre_cleanups = 0x0}
<So Thread 1 is joining the apr_threadpool's Threads 11..2>
(gdb) thread 2
[Switching to thread 2 (Thread 0x7ffff7cb4700 (LWP 2898012))]
<snip>
(gdb) frame 4
#3 0x00007ffff7f9e48e in apr_pool_destroy_debug (pool=0x55555562ab00,
file_line=0x7ffff7fb6718 "threadproc/unix/thread.c:211") at
memory/unix/apr_pools.c:2005
2005 mutex = parent_lock(pool);
(gdb) p *pool
$4 = {parent = 0x55555562a430, child = 0x0, sibling = 0x0, ref =
0x55555562ad30, cleanups = 0x0, free_cleanups = 0x0, allocator = 0x0,
subprocesses = 0x0, abort_fn = 0x0, user_data = 0x0, tag = 0x7ffff7fb66fb
"threadproc/unix/thread.c:179", joined = 0x0, nodes = 0x0, file_line =
0x7ffff7fb66fb "threadproc/unix/thread.c:179", creation_flags = 0, stat_alloc =
0, stat_total_alloc = 0, stat_clear = 0, owner = 140737350686592, mutex =
0x555555628340, pre_cleanups = 0x0}
(gdb) p *pool->parent
$4 = {parent = 0x555555628880, child = 0x55555562be20, sibling =
0x555555629c50, ref = 0x555555628888, cleanups = 0x55555562a9b0, free_cleanups
= 0x0, allocator = 0x0, subprocesses = 0x0, abort_fn = 0x0, user_data = 0x0,
tag = 0x7ffff7fb4a08 "util-misc/apr_thread_pool.c:363", joined = 0x0, nodes =
0x55555562a520, file_line = 0x7ffff7fb4a08 "util-misc/apr_thread_pool.c:363",
creation_flags = 0, stat_alloc = 61, stat_total_alloc = 61, stat_clear = 0,
owner = 140737350686592, mutex = 0x555555628340, pre_cleanups = 0x0}
<And Threads 11..2 are destroying their apr_thread's pool from
apr_thread_exit(), which requires locking the same pool as Thread 1>
Index: include/arch/unix/apr_arch_threadproc.h
===================================================================
--- include/arch/unix/apr_arch_threadproc.h (revision 1883836)
+++ include/arch/unix/apr_arch_threadproc.h (working copy)
@@ -59,6 +59,7 @@ struct apr_thread_t {
void *data;
apr_thread_start_t func;
apr_status_t exitval;
+ int detached;
};
struct apr_threadattr_t {
Index: threadproc/unix/thread.c
===================================================================
--- threadproc/unix/thread.c (revision 1883836)
+++ threadproc/unix/thread.c (working copy)
@@ -143,7 +143,9 @@ static void *dummy_worker(void *opaque)
apr_pool_owner_set(thread->pool, 0);
ret = thread->func(thread, thread->data);
- apr_pool_destroy(thread->pool);
+ if (thread->detached && thread->pool) {
+ apr_pool_destroy(thread->pool);
+ }
return ret;
}
@@ -170,6 +172,7 @@ APR_DECLARE(apr_status_t) apr_thread_create(apr_th
(*new)->data = data;
(*new)->func = func;
+ (*new)->detached = (attr && apr_threadattr_detach_get(attr) == APR_DETACH);
if (attr)
temp = &attr->attr;
@@ -208,7 +211,9 @@ APR_DECLARE(void) apr_thread_exit(apr_thread_t *th
apr_status_t retval)
{
thd->exitval = retval;
- apr_pool_destroy(thd->pool);
+ if (thd->detached && thd->pool) {
+ apr_pool_destroy(thd->pool);
+ }
pthread_exit(NULL);
}
@@ -218,8 +223,13 @@ APR_DECLARE(apr_status_t) apr_thread_join(apr_stat
apr_status_t stat;
apr_status_t *thread_stat;
+ if (thd->detached) {
+ return APR_EINVAL;
+ }
+
if ((stat = pthread_join(*thd->td,(void *)&thread_stat)) == 0) {
*retval = thd->exitval;
+ apr_pool_destroy(thd->pool);
return APR_SUCCESS;
}
else {
@@ -240,6 +250,7 @@ APR_DECLARE(apr_status_t) apr_thread_detach(apr_th
#else
if ((stat = pthread_detach(*thd->td)) == 0) {
#endif
+ thd->detached = 1;
return APR_SUCCESS;
}
Index: util-misc/apr_thread_pool.c
===================================================================
--- util-misc/apr_thread_pool.c (revision 1883836)
+++ util-misc/apr_thread_pool.c (working copy)
@@ -71,9 +71,13 @@ struct apr_thread_pool
struct apr_thread_pool_tasks *scheduled_tasks;
struct apr_thread_list *busy_thds;
struct apr_thread_list *idle_thds;
+ struct apr_thread_list *dead_thds;
+ apr_thread_cond_t *more_work;
+ apr_thread_cond_t *work_done;
+ apr_thread_cond_t *all_done;
apr_thread_mutex_t *lock;
- apr_thread_cond_t *cond;
volatile int terminated;
+ int waiting_work_done;
struct apr_thread_pool_tasks *recycled_tasks;
struct apr_thread_list *recycled_thds;
apr_thread_pool_task_t *task_idx[TASK_PRIORITY_SEGS];
@@ -94,11 +98,24 @@ static apr_status_t thread_pool_construct(apr_thre
if (APR_SUCCESS != rv) {
return rv;
}
- rv = apr_thread_cond_create(&me->cond, me->pool);
+ rv = apr_thread_cond_create(&me->more_work, me->pool);
if (APR_SUCCESS != rv) {
apr_thread_mutex_destroy(me->lock);
return rv;
}
+ rv = apr_thread_cond_create(&me->work_done, me->pool);
+ if (APR_SUCCESS != rv) {
+ apr_thread_cond_destroy(me->more_work);
+ apr_thread_mutex_destroy(me->lock);
+ return rv;
+ }
+ rv = apr_thread_cond_create(&me->all_done, me->pool);
+ if (APR_SUCCESS != rv) {
+ apr_thread_cond_destroy(me->work_done);
+ apr_thread_cond_destroy(me->more_work);
+ apr_thread_mutex_destroy(me->lock);
+ return rv;
+ }
me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
if (!me->tasks) {
goto CATCH_ENOMEM;
@@ -124,6 +141,11 @@ static apr_status_t thread_pool_construct(apr_thre
goto CATCH_ENOMEM;
}
APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
+ me->dead_thds = apr_palloc(me->pool, sizeof(*me->dead_thds));
+ if (!me->dead_thds) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->dead_thds, apr_thread_list_elt, link);
me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
if (!me->recycled_thds) {
goto CATCH_ENOMEM;
@@ -139,8 +161,10 @@ static apr_status_t thread_pool_construct(apr_thre
goto FINAL_EXIT;
CATCH_ENOMEM:
rv = APR_ENOMEM;
+ apr_thread_cond_destroy(me->all_done);
+ apr_thread_cond_destroy(me->work_done);
+ apr_thread_cond_destroy(me->more_work);
apr_thread_mutex_destroy(me->lock);
- apr_thread_cond_destroy(me->cond);
FINAL_EXIT:
return rv;
}
@@ -231,9 +255,9 @@ static struct apr_thread_list_elt *elt_new(apr_thr
* The worker thread function. Take a task from the queue and perform it if
* there is any. Otherwise, put itself into the idle thread list and waiting
* for signal to wake up.
- * The thread terminate directly by detach and exit when it is asked to stop
- * after finishing a task. Otherwise, the thread should be in idle thread list
- * and should be joined.
+ * The thread terminates directly and exits when it is asked to stop after
+ * finishing a task. The thread should be in the idle thread list and it
+ * should be joined.
*/
static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param)
{
@@ -270,6 +294,11 @@ static void *APR_THREAD_FUNC thread_pool_func(apr_
APR_RING_INSERT_TAIL(me->recycled_tasks, task,
apr_thread_pool_task, link);
elt->current_owner = NULL;
+ if (me->waiting_work_done) {
+ apr_thread_cond_signal(me->work_done);
+ apr_thread_mutex_unlock(me->lock);
+ apr_thread_mutex_lock(me->lock);
+ }
if (TH_STOP == elt->state) {
break;
}
@@ -276,23 +305,18 @@ static void *APR_THREAD_FUNC thread_pool_func(apr_
task = pop_task(me);
}
assert(NULL == elt->current_owner);
- if (TH_STOP != elt->state)
- APR_RING_REMOVE(elt, link);
+ APR_RING_REMOVE(elt, link);
- /* Test if a busy thread been asked to stop, which is not joinable */
+ /* Test if a busy thread been asked to stop */
if ((me->idle_cnt >= me->idle_max
&& !(me->scheduled_task_cnt && 0 >= me->idle_max)
&& !me->idle_wait)
|| me->terminated || elt->state != TH_RUN) {
- --me->thd_cnt;
if ((TH_PROBATION == elt->state) && me->idle_wait)
++me->thd_timed_out;
- APR_RING_INSERT_TAIL(me->recycled_thds, elt,
+ APR_RING_INSERT_TAIL(me->dead_thds, elt,
apr_thread_list_elt, link);
- apr_thread_mutex_unlock(me->lock);
- apr_thread_detach(t);
- apr_thread_exit(t, APR_SUCCESS);
- return NULL; /* should not be here, safe net */
+ break;
}
/* busy thread become idle */
@@ -314,32 +338,56 @@ static void *APR_THREAD_FUNC thread_pool_func(apr_
wait = -1;
if (wait >= 0) {
- apr_thread_cond_timedwait(me->cond, me->lock, wait);
+ apr_thread_cond_timedwait(me->more_work, me->lock, wait);
}
else {
- apr_thread_cond_wait(me->cond, me->lock);
+ apr_thread_cond_wait(me->more_work, me->lock);
}
}
/* idle thread been asked to stop, will be joined */
- --me->thd_cnt;
+ if (--me->thd_cnt == 0 && me->terminated) {
+ apr_thread_cond_signal(me->all_done);
+ }
apr_thread_mutex_unlock(me->lock);
apr_thread_exit(t, APR_SUCCESS);
return NULL; /* should not be here, safe net */
}
-static apr_status_t thread_pool_cleanup(void *me)
+/* Must be locked by the caller */
+static void join_deads(apr_thread_pool_t *me)
{
- apr_thread_pool_t *_myself = me;
+ struct apr_thread_list_elt *elt;
+ apr_status_t rv;
- _myself->terminated = 1;
- apr_thread_pool_idle_max_set(_myself, 0);
- while (_myself->thd_cnt) {
- apr_sleep(20 * 1000); /* spin lock with 20 ms */
+ while (!APR_RING_EMPTY(me->dead_thds, apr_thread_list_elt, link)) {
+ elt = APR_RING_FIRST(me->dead_thds);
+ apr_thread_join(&rv, elt->thd);
+
+ APR_RING_REMOVE(elt, link);
+ APR_RING_INSERT_TAIL(me->recycled_thds, elt, apr_thread_list_elt,
+ link);
}
- apr_pool_owner_set(_myself->pool, 0);
- apr_thread_mutex_destroy(_myself->lock);
- apr_thread_cond_destroy(_myself->cond);
+}
+
+static apr_status_t thread_pool_cleanup(void *data)
+{
+ apr_thread_pool_t *me = data;
+
+ me->terminated = 1;
+ apr_thread_pool_tasks_cancel(me, NULL);
+ apr_thread_pool_thread_max_set(me, 0);
+ apr_thread_pool_idle_max_set(me, 0);
+
+ apr_thread_mutex_lock(me->lock);
+ if (me->thd_cnt) {
+ apr_thread_cond_wait(me->all_done, me->lock);
+ }
+ /* All threads should be dead now, join them */
+ apr_pool_owner_set(me->pool, 0);
+ join_deads(me);
+ apr_thread_mutex_unlock(me->lock);
+
return APR_SUCCESS;
}
@@ -488,6 +536,7 @@ static apr_status_t schedule_task(apr_thread_pool_
apr_thread_pool_task_t *t_loc;
apr_thread_t *thd;
apr_status_t rv = APR_SUCCESS;
+
apr_thread_mutex_lock(me->lock);
apr_pool_owner_set(me->pool, 0);
@@ -525,8 +574,9 @@ static apr_status_t schedule_task(apr_thread_pool_
me->thd_high = me->thd_cnt;
}
}
- apr_thread_cond_signal(me->cond);
+ apr_thread_cond_signal(me->more_work);
apr_thread_mutex_unlock(me->lock);
+
return rv;
}
@@ -580,7 +630,7 @@ static apr_status_t add_task(apr_thread_pool_t *me
}
}
- apr_thread_cond_signal(me->cond);
+ apr_thread_cond_signal(me->more_work);
apr_thread_mutex_unlock(me->lock);
return rv;
@@ -625,7 +675,7 @@ static apr_status_t remove_scheduled_tasks(apr_thr
link)) {
next = APR_RING_NEXT(t_loc, link);
/* if this is the owner remove it */
- if (t_loc->owner == owner) {
+ if (!owner || t_loc->owner == owner) {
--me->scheduled_task_cnt;
APR_RING_REMOVE(t_loc, link);
}
@@ -643,7 +693,7 @@ static apr_status_t remove_tasks(apr_thread_pool_t
t_loc = APR_RING_FIRST(me->tasks);
while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
next = APR_RING_NEXT(t_loc, link);
- if (t_loc->owner == owner) {
+ if (!owner || t_loc->owner == owner) {
--me->task_cnt;
seg = TASK_PRIORITY_SEG(t_loc);
if (t_loc == me->task_idx[seg]) {
@@ -671,7 +721,7 @@ static void wait_on_busy_threads(apr_thread_pool_t
apr_thread_mutex_lock(me->lock);
elt = APR_RING_FIRST(me->busy_thds);
while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
- if (elt->current_owner != owner) {
+ if (!elt->current_owner || (owner && elt->current_owner != owner)) {
elt = APR_RING_NEXT(elt, link);
continue;
}
@@ -685,11 +735,13 @@ static void wait_on_busy_threads(apr_thread_pool_t
assert(!apr_os_thread_equal(apr_os_thread_current(), *os_thread));
#endif
#endif
- while (elt->current_owner == owner) {
- apr_thread_mutex_unlock(me->lock);
- apr_sleep(200 * 1000);
- apr_thread_mutex_lock(me->lock);
- }
+ do {
+ me->waiting_work_done = 1;
+ apr_thread_cond_wait(me->work_done, me->lock);
+ me->waiting_work_done = 0;
+ } while (elt->current_owner && (!owner || elt->current_owner == owner));
+
+ /* Restart */
elt = APR_RING_FIRST(me->busy_thds);
}
apr_thread_mutex_unlock(me->lock);
@@ -779,7 +831,7 @@ APR_DECLARE(apr_interval_time_t)
/*
* This function stop extra idle threads to the cnt.
- * @return the number of threads stopped
+ * @return the list of threads stopped
* NOTE: There could be busy threads become idle during this function
*/
static struct apr_thread_list_elt *trim_threads(apr_thread_pool_t *me,
@@ -791,6 +843,9 @@ static struct apr_thread_list_elt *trim_threads(ap
apr_thread_mutex_lock(me->lock);
apr_pool_owner_set(me->pool, 0);
+
+ join_deads(me);
+
if (idle) {
thds = me->idle_thds;
n = me->idle_cnt;
@@ -839,33 +894,32 @@ static apr_size_t trim_idle_threads(apr_thread_poo
struct apr_thread_list_elt *elt, *head, *tail;
apr_status_t rv;
- elt = trim_threads(me, &cnt, 1);
+ head = trim_threads(me, &cnt, 1);
+ if (!head) {
+ return 0;
+ }
apr_thread_mutex_lock(me->lock);
- apr_thread_cond_broadcast(me->cond);
+ apr_thread_cond_broadcast(me->more_work);
apr_thread_mutex_unlock(me->lock);
n_dbg = 0;
- if (NULL != (head = elt)) {
- while (elt) {
- tail = elt;
- apr_thread_join(&rv, elt->thd);
- elt = APR_RING_NEXT(elt, link);
- ++n_dbg;
- }
- apr_thread_mutex_lock(me->lock);
- APR_RING_SPLICE_TAIL(me->recycled_thds, head, tail,
- apr_thread_list_elt, link);
- apr_thread_mutex_unlock(me->lock);
- }
+ elt = head;
+ do {
+ ++n_dbg;
+ tail = elt;
+ apr_thread_join(&rv, elt->thd);
+ elt = APR_RING_NEXT(elt, link);
+ } while (elt);
assert(cnt == n_dbg);
+ apr_thread_mutex_lock(me->lock);
+ APR_RING_SPLICE_TAIL(me->recycled_thds, head, tail,
+ apr_thread_list_elt, link);
+ apr_thread_mutex_unlock(me->lock);
return cnt;
}
-/* don't join on busy threads for performance reasons, who knows how long will
- * the task takes to perform
- */
static apr_size_t trim_busy_threads(apr_thread_pool_t *me, apr_size_t cnt)
{
trim_threads(me, &cnt, 0);
@@ -904,20 +958,22 @@ APR_DECLARE(apr_size_t) apr_thread_pool_thread_max
APR_DECLARE(apr_size_t) apr_thread_pool_thread_max_set(apr_thread_pool_t *me,
apr_size_t cnt)
{
- apr_size_t n;
+ apr_size_t n, i;
me->thd_max = cnt;
- if (0 == cnt || me->thd_cnt <= cnt) {
+ n = me->thd_cnt;
+ if (n <= cnt) {
return 0;
}
- n = me->thd_cnt - cnt;
- if (n >= me->idle_cnt) {
- trim_busy_threads(me, n - me->idle_cnt);
+ n -= cnt;
+ i = me->idle_cnt;
+ if (n >= i) {
+ trim_busy_threads(me, n - i);
trim_idle_threads(me, 0);
}
else {
- trim_idle_threads(me, me->idle_cnt - n);
+ trim_idle_threads(me, i - n);
}
return n;
}