The branch, master has been updated
via c2bb1334175 lib/pthreadpool: protect jobs list from concurrent
thread access
via 07725914a5a lib/pthreadpool: Fix possible concurrent access to
pool->glue_list
via 209e6029656 lib/pthreadpool: fix free of already freed glue object
via 9db95fb92df Add unit cmocka tests for pthreadpool_tevent
via 5d9cb79d132 Add basic cmocka unit tests for pthreadpool
from 611b4d6de72 docs-xml/manpages: doc for 'vfs_aio_ratelimit' module
https://git.samba.org/?p=samba.git;a=shortlog;h=master
- Log -----------------------------------------------------------------
commit c2bb1334175dff7b25e5bc8da1e8ba7e046ca386
Author: Noel Power <[email protected]>
Date: Thu Nov 20 09:00:35 2025 +0000
lib/pthreadpool: protect jobs list from concurrent thread access
ThreadSanitizer identifies a data race with pool->jobs with concurrent
threads in test added in previous commit.
This commit protects the pool->jobs list
(trace and line numbers are from before glue fix in previous commit)
WARNING: ThreadSanitizer: data race (pid=13574)
Write of size 8 at 0x7b6000020260 by thread T16:
#0 pthreadpool_tevent_job_done
../../lib/pthreadpool/pthreadpool_tevent.c:405
(pthreadpool_tevent_unit_test_san+0x407080)
#1 tevent_common_invoke_immediate_handler
../../lib/tevent/tevent_immediate.c:190 (libtevent-private-samba.so+0x8dbf)
#2 pthreadpool_tevent_job_fn
../../lib/pthreadpool/pthreadpool_tevent.c:351
(pthreadpool_tevent_unit_test_san+0x406bc4)
#3 pthreadpool_server ../../lib/pthreadpool/pthreadpool.c:655
(pthreadpool_tevent_unit_test_san+0x4043bd)
#4 <null> <null> (libtsan.so.0+0x323cf)
Previous write of size 8 at 0x7b6000020260 by thread T13:
#0 pthreadpool_tevent_job_send
../../lib/pthreadpool/pthreadpool_tevent.c:342
(pthreadpool_tevent_unit_test_san+0x406a09)
#1 do_nested_pthread_job
../../lib/pthreadpool/test_pthreadpool_tevent.c:463
(pthreadpool_tevent_unit_test_san+0x408932)
#2 pthreadpool_tevent_job_fn
../../lib/pthreadpool/pthreadpool_tevent.c:351
(pthreadpool_tevent_unit_test_san+0x406bc4)
#3 pthreadpool_server ../../lib/pthreadpool/pthreadpool.c:655
(pthreadpool_tevent_unit_test_san+0x4043bd)
#4 <null> <null> (libtsan.so.0+0x323cf)
Thread T16 (tid=13591, running) created by main thread at:
#0 pthread_create <null> (libtsan.so.0+0x5ed75)
#1 pthreadpool_create_thread ../../lib/pthreadpool/pthreadpool.c:711
(pthreadpool_tevent_unit_test_san+0x4045ac)
#2 pthreadpool_add_job ../../lib/pthreadpool/pthreadpool.c:792
(pthreadpool_tevent_unit_test_san+0x40496f)
#3 pthreadpool_tevent_job_send
../../lib/pthreadpool/pthreadpool_tevent.c:329
(pthreadpool_tevent_unit_test_san+0x4065e2)
#4 test_pthreadpool_tevent_job_send_multiple_3
../../lib/pthreadpool/test_pthreadpool_tevent.c:515
(pthreadpool_tevent_unit_test_san+0x408c25)
#5 cmocka_run_one_test_or_fixture
../../third_party/cmocka/cmocka.c:2948 (libcmocka-private-samba.so+0x6f92)
#6 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58
(libc.so.6+0x40e6b)
Thread T13 (tid=13588, running) created by main thread at:
#0 pthread_create <null> (libtsan.so.0+0x5ed75)
#1 pthreadpool_create_thread ../../lib/pthreadpool/pthreadpool.c:711
(pthreadpool_tevent_unit_test_san+0x4045ac)
#2 pthreadpool_add_job ../../lib/pthreadpool/pthreadpool.c:792
(pthreadpool_tevent_unit_test_san+0x40496f)
#3 pthreadpool_tevent_job_send
../../lib/pthreadpool/pthreadpool_tevent.c:329
(pthreadpool_tevent_unit_test_san+0x4065e2)
#4 test_pthreadpool_tevent_job_send_multiple_3
../../lib/pthreadpool/test_pthreadpool_tevent.c:515
(pthreadpool_tevent_unit_test_san+0x408c25)
#5 cmocka_run_one_test_or_fixture
../../third_party/cmocka/cmocka.c:2948 (libcmocka-private-samba.so+0x6f92)
#6 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58
(libc.so.6+0x40e6b)
SUMMARY: ThreadSanitizer: data race
../../lib/pthreadpool/pthreadpool_tevent.c:405 in pthreadpool_tevent_job_done
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15958
Signed-off-by: Noel Power <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
Autobuild-User(master): Stefan Metzmacher <[email protected]>
Autobuild-Date(master): Sun Jan 18 15:16:59 UTC 2026 on atb-devel-224
commit 07725914a5af02af3b4dc267b76b531bf49254e8
Author: Noel Power <[email protected]>
Date: Wed Nov 12 12:24:59 2025 +0000
lib/pthreadpool: Fix possible concurrent access to pool->glue_list
ThreadSanitizer run against tests added in previous commit
identify a race condition with pool->glue_list with concurrent
thread access
WARNING: ThreadSanitizer: data race (pid=13574)
Read of size 8 at 0x7b2000000368 by thread T7:
#0 pthreadpool_tevent_job_signal
../../lib/pthreadpool/pthreadpool_tevent.c:370
(pthreadpool_tevent_unit_test_san+0x406c6e)
#1 pthreadpool_server ../../lib/pthreadpool/pthreadpool.c:657
(pthreadpool_tevent_unit_test_san+0x40443b)
#2 <null> <null> (libtsan.so.0+0x323cf)
Previous write of size 8 at 0x7b2000000368 by main thread:
#0 pthreadpool_tevent_glue_destructor
../../lib/pthreadpool/pthreadpool_tevent.c:165
(pthreadpool_tevent_unit_test_san+0x405aed)
#1 _tc_free_internal ../../lib/talloc/talloc.c:1158
(libtalloc-private-samba.so+0x3419)
#2 _tc_free_internal ../../lib/talloc/talloc.c:1158
(libtalloc-private-samba.so+0x3419)
#3 cmocka_run_one_test_or_fixture
../../third_party/cmocka/cmocka.c:2948 (libcmocka-private-samba.so+0x6f92)
#4 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58
(libc.so.6+0x40e6b)
Location is heap block of size 120 at 0x7b2000000300 allocated by main
thread:
#0 malloc <null> (libtsan.so.0+0x35799)
#1 __talloc_with_prefix ../../lib/talloc/talloc.c:783
(libtalloc-private-samba.so+0x2a99)
#2 test_pthreadpool_tevent_job_send_multiple_2
../../lib/pthreadpool/test_pthreadpool_tevent.c:399
(pthreadpool_tevent_unit_test_san+0x40856f)
#3 cmocka_run_one_test_or_fixture
../../third_party/cmocka/cmocka.c:2948 (libcmocka-private-samba.so+0x6f92)
#4 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58
(libc.so.6+0x40e6b)
Thread T7 (tid=13582, running) created by main thread at:
#0 pthread_create <null> (libtsan.so.0+0x5ed75)
#1 pthreadpool_create_thread ../../lib/pthreadpool/pthreadpool.c:711
(pthreadpool_tevent_unit_test_san+0x4045ac)
#2 pthreadpool_add_job ../../lib/pthreadpool/pthreadpool.c:792
(pthreadpool_tevent_unit_test_san+0x40496f)
#3 pthreadpool_tevent_job_send
../../lib/pthreadpool/pthreadpool_tevent.c:329
(pthreadpool_tevent_unit_test_san+0x4065e2)
#4 test_pthreadpool_tevent_job_send_multiple_2
../../lib/pthreadpool/test_pthreadpool_tevent.c:423
(pthreadpool_tevent_unit_test_san+0x4086b2)
#5 cmocka_run_one_test_or_fixture
../../third_party/cmocka/cmocka.c:2948 (libcmocka-private-samba.so+0x6f92)
#6 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58
(libc.so.6+0x40e6b)
SUMMARY: ThreadSanitizer: data race
../../lib/pthreadpool/pthreadpool_tevent.c:370 in pthreadpool_tevent_job_signal
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15958
Signed-off-by: Noel Power <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
commit 209e60296563e3da53f6cc7ed8a758cf34d7a285
Author: Noel Power <[email protected]>
Date: Mon Nov 17 08:51:49 2025 +0000
lib/pthreadpool: fix free of already freed glue object
The lines are a bit skewed from actual sources (due to temp
debug lines) but I have inserted the relevant source code below
basically if we free ev_link then this will free the glue object, so
deleting the glue object on the next line is not what we want to do.
==14263== Invalid read of size 4
==14263== at 0x4D13E90: talloc_chunk_from_ptr (talloc.c:527)
==14263== by 0x4D1621E: _talloc_free (talloc.c:1770)
249 #ifdef HAVE_PTHREAD
250 glue->tctx = tevent_threaded_context_create(glue, ev);
251 if (glue->tctx == NULL) {
252 TALLOC_FREE(ev_link);
* 253 TALLOC_FREE(glue);
254 return ENOMEM;
250 }
==14263== by 0x51FA0AF: pthreadpool_tevent_register_ev
(pthreadpool_tevent.c:253)
==14263== by 0x51FA302: pthreadpool_tevent_job_send
(pthreadpool_tevent.c:324)
==14263== by 0x4B01E68: vfswrap_fsync_send (vfs_default.c:1104)
==14263== by 0x49CD9B1: smb_vfs_call_fsync_send (vfs.c:1998)
==14263== by 0x49CDBF9: smb_vfs_fsync_sync (vfs.c:2057)
==14263== by 0x494B1E5: sync_file (fileio.c:320)
==14263== by 0x497CC77: reply_flush (reply.c:5398)
==14263== by 0x49E28CB: switch_message (process.c:1726)
==14263== by 0x49E2AA4: construct_reply (process.c:1762)
==14263== by 0x49E37F8: process_smb (process.c:2017)
==14263== Address 0xcb415d0 is 0 bytes inside a block of size 144 free'd
==14263== at 0x484494B: free (in
/usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==14263== by 0x4D14F81: _tc_free_internal (talloc.c:1222)
==14263== by 0x4D15025: _talloc_free_internal (talloc.c:1248)
==14263== by 0x4D162ED: _talloc_free (talloc.c:1792)
190 static int pthreadpool_tevent_glue_link_destructor(
191 struct pthreadpool_tevent_glue_ev_link *ev_link)
192 {
* 193 TALLOC_FREE(ev_link->glue);
194 return 0;
195 }
==14263== by 0x51F9EC3: pthreadpool_tevent_glue_link_destructor
(pthreadpool_tevent.c:193)
==14263== by 0x4D14CA9: _tc_free_internal (talloc.c:1158)
==14263== by 0x4D15025: _talloc_free_internal (talloc.c:1248)
==14263== by 0x4D162ED: _talloc_free (talloc.c:1792)
249 #ifdef HAVE_PTHREAD
250 glue->tctx = tevent_threaded_context_create(glue, ev);
251 if (glue->tctx == NULL) {
* 252 TALLOC_FREE(ev_link);
253 TALLOC_FREE(glue);
254 return ENOMEM;
250 }
==14263== by 0x51FA08D: pthreadpool_tevent_register_ev
(pthreadpool_tevent.c:252)
==14263== by 0x51FA302: pthreadpool_tevent_job_send
(pthreadpool_tevent.c:324)
==14263== by 0x4B01E68: vfswrap_fsync_send (vfs_default.c:1104)
==14263== by 0x49CD9B1: smb_vfs_call_fsync_send (vfs.c:1998)
==14263== Block was alloc'd at
==14263== at 0x4841984: malloc (in
/usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==14263== by 0x4D14339: __talloc_with_prefix (talloc.c:783)
==14263== by 0x4D144D3: __talloc (talloc.c:825)
==14263== by 0x4D1486C: _talloc_named_const (talloc.c:982)
==14263== by 0x4D1734D: _talloc_zero (talloc.c:2421)
==14263== by 0x51F9F46: pthreadpool_tevent_register_ev
(pthreadpool_tevent.c:222)
==14263== by 0x51FA302: pthreadpool_tevent_job_send
(pthreadpool_tevent.c:324)
==14263== by 0x4B01E68: vfswrap_fsync_send (vfs_default.c:1104)
==14263== by 0x49CD9B1: smb_vfs_call_fsync_send (vfs.c:1998)
==14263== by 0x49CDBF9: smb_vfs_fsync_sync (vfs.c:2057)
==14263== by 0x494B1E5: sync_file (fileio.c:320)
==14263== by 0x497CC77: reply_flush (reply.c:5398)
==14263==
==14263== Invalid read of size 4
==14263== at 0x4D13EAE: talloc_chunk_from_ptr (talloc.c:528)
==14263== by 0x4D1621E: _talloc_free (talloc.c:1770)
==14263== by 0x51FA0AF: pthreadpool_tevent_register_ev
(pthreadpool_tevent.c:253)
==14263== by 0x51FA302: pthreadpool_tevent_job_send
(pthreadpool_tevent.c:324)
==14263== by 0x4B01E68: vfswrap_fsync_send (vfs_default.c:1104)
==14263== by 0x49CD9B1: smb_vfs_call_fsync_send (vfs.c:1998)
==14263== by 0x49CDBF9: smb_vfs_fsync_sync (vfs.c:2057)
==14263== by 0x494B1E5: sync_file (fileio.c:320)
==14263== by 0x497CC77: reply_flush (reply.c:5398)
==14263== by 0x49E28CB: switch_message (process.c:1726)
==14263== by 0x49E2AA4: construct_reply (process.c:1762)
==14263== by 0x49E37F8: process_smb (process.c:2017)
==14263== Address 0xcb415d0 is 0 bytes inside a block of size 144 free'd
==14263== at 0x484494B: free (in
/usr/lib/valgrind/vgpreload_memcheck-amd64-l
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15957
Signed-off-by: Noel Power <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
commit 9db95fb92dfa5ad6b702b95c63f6c6a29948dc40
Author: Noel Power <[email protected]>
Date: Wed Nov 26 16:38:25 2025 +0000
Add unit cmocka tests for pthreadpool_tevent
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15958
Signed-off-by: Noel Power <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
commit 5d9cb79d13274dcda14578447e59a9da18228872
Author: Noel Power <[email protected]>
Date: Wed Nov 26 09:46:23 2025 +0000
Add basic cmocka unit tests for pthreadpool
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15958
Pair-Programmed-With: Andreas Schneider <[email protected]>
Signed-off-by: Noel Power <[email protected]>
Signed-off-by: Andreas Schneider <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
-----------------------------------------------------------------------
Summary of changes:
lib/pthreadpool/pthreadpool_tevent.c | 121 ++++-
lib/pthreadpool/test_pthreadpool.c | 636 ++++++++++++++++++++++++
lib/pthreadpool/test_pthreadpool_tevent.c | 777 ++++++++++++++++++++++++++++++
lib/pthreadpool/wscript_build | 12 +
source3/selftest/tests.py | 4 +
5 files changed, 1548 insertions(+), 2 deletions(-)
create mode 100644 lib/pthreadpool/test_pthreadpool.c
create mode 100644 lib/pthreadpool/test_pthreadpool_tevent.c
Changeset truncated at 500 lines:
diff --git a/lib/pthreadpool/pthreadpool_tevent.c
b/lib/pthreadpool/pthreadpool_tevent.c
index a66f444d193..422a88c784a 100644
--- a/lib/pthreadpool/pthreadpool_tevent.c
+++ b/lib/pthreadpool/pthreadpool_tevent.c
@@ -19,6 +19,7 @@
#include "replace.h"
#include "system/filesys.h"
+#include "system/threads.h"
#include "pthreadpool_tevent.h"
#include "pthreadpool.h"
#include "lib/util/tevent_unix.h"
@@ -57,8 +58,17 @@ struct pthreadpool_tevent_glue_ev_link {
struct pthreadpool_tevent {
struct pthreadpool *pool;
struct pthreadpool_tevent_glue *glue_list;
+ /*
+ * Control access to the glue_list
+ */
+ pthread_mutex_t glue_mutex;
struct pthreadpool_tevent_job_state *jobs;
+ /*
+ * Control access to the jobs
+ */
+ pthread_mutex_t jobs_mutex;
+
};
struct pthreadpool_tevent_job_state {
@@ -97,6 +107,19 @@ int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned
max_threads,
return ret;
}
+ ret = pthread_mutex_init(&pool->glue_mutex, NULL);
+ if (ret != 0) {
+ TALLOC_FREE(pool);
+ return ret;
+ }
+
+ ret = pthread_mutex_init(&pool->jobs_mutex, NULL);
+ if (ret != 0) {
+ pthread_mutex_destroy(&pool->glue_mutex);
+ TALLOC_FREE(pool);
+ return ret;
+ }
+
talloc_set_destructor(pool, pthreadpool_tevent_destructor);
*presult = pool;
@@ -132,12 +155,26 @@ static int pthreadpool_tevent_destructor(struct
pthreadpool_tevent *pool)
return ret;
}
+ ret = pthread_mutex_lock(&pool->jobs_mutex);
+ if (ret != 0 ) {
+ return ret;
+ }
for (state = pool->jobs; state != NULL; state = next) {
next = state->next;
DLIST_REMOVE(pool->jobs, state);
state->pool = NULL;
}
+ ret = pthread_mutex_unlock(&pool->jobs_mutex);
+ if (ret != 0 ) {
+ return ret;
+ }
+
+ ret = pthread_mutex_lock(&pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
+
/*
* Delete all the registered
* tevent_context/tevent_threaded_context
@@ -147,6 +184,11 @@ static int pthreadpool_tevent_destructor(struct
pthreadpool_tevent *pool)
/* The glue destructor removes it from the list */
TALLOC_FREE(glue);
}
+
+ pthread_mutex_unlock(&pool->glue_mutex);
+ pthread_mutex_destroy(&pool->jobs_mutex);
+ pthread_mutex_destroy(&pool->glue_mutex);
+
pool->glue_list = NULL;
ret = pthreadpool_destroy(pool->pool);
@@ -158,6 +200,16 @@ static int pthreadpool_tevent_destructor(struct
pthreadpool_tevent *pool)
return 0;
}
+/*
+ * glue destruction is only called with
+ * glue_mutex already locked either from
+ * a) pthreadpool_tevent_destructor or
+ * b) pthreadpool_tevent_glue_link_destructor
+ * pthreadpool_tevent_destructor accesses
+ * the glue_list while calling pthreadpool_tevent_glue_destructor
+ * which modifies the glue_list so it needs the lock held while
+ * doing that.
+ */
static int pthreadpool_tevent_glue_destructor(
struct pthreadpool_tevent_glue *glue)
{
@@ -190,7 +242,21 @@ static int pthreadpool_tevent_glue_destructor(
static int pthreadpool_tevent_glue_link_destructor(
struct pthreadpool_tevent_glue_ev_link *ev_link)
{
- TALLOC_FREE(ev_link->glue);
+ if (ev_link->glue) {
+ int ret;
+ /* save the mutex */
+ pthread_mutex_t *glue_mutex =
+ &ev_link->glue->pool->glue_mutex;
+ ret = pthread_mutex_lock(glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
+ TALLOC_FREE(ev_link->glue);
+ ret = pthread_mutex_unlock(glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
+ }
return 0;
}
@@ -199,7 +265,12 @@ static int pthreadpool_tevent_register_ev(struct
pthreadpool_tevent *pool,
{
struct pthreadpool_tevent_glue *glue = NULL;
struct pthreadpool_tevent_glue_ev_link *ev_link = NULL;
+ int ret;
+ ret = pthread_mutex_lock(&pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
/*
* See if this tevent_context was already registered by
* searching the glue object list. If so we have nothing
@@ -208,10 +279,18 @@ static int pthreadpool_tevent_register_ev(struct
pthreadpool_tevent *pool,
*/
for (glue = pool->glue_list; glue != NULL; glue = glue->next) {
if (glue->ev == ev) {
+ ret = pthread_mutex_unlock(&pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
return 0;
}
}
+ ret = pthread_mutex_unlock(&pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
/*
* Event context not yet registered - create a new glue
* object containing a tevent_context/tevent_threaded_context
@@ -250,12 +329,21 @@ static int pthreadpool_tevent_register_ev(struct
pthreadpool_tevent *pool,
glue->tctx = tevent_threaded_context_create(glue, ev);
if (glue->tctx == NULL) {
TALLOC_FREE(ev_link);
- TALLOC_FREE(glue);
return ENOMEM;
}
#endif
+ ret = pthread_mutex_lock(&pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
+
DLIST_ADD(pool->glue_list, glue);
+
+ ret = pthread_mutex_unlock(&pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
return 0;
}
@@ -339,8 +427,17 @@ struct tevent_req *pthreadpool_tevent_job_send(
*/
talloc_set_destructor(state, pthreadpool_tevent_job_state_destructor);
+ ret = pthread_mutex_lock(&pool->jobs_mutex);
+ if (tevent_req_error(req, ret)) {
+ return tevent_req_post(req, ev);
+ }
DLIST_ADD_END(pool->jobs, state);
+ ret = pthread_mutex_unlock(&pool->jobs_mutex);
+ if (tevent_req_error(req, ret)) {
+ return tevent_req_post(req, ev);
+ }
+
return req;
}
@@ -360,6 +457,7 @@ static int pthreadpool_tevent_job_signal(int jobid,
job_private_data, struct pthreadpool_tevent_job_state);
struct tevent_threaded_context *tctx = NULL;
struct pthreadpool_tevent_glue *g = NULL;
+ int ret;
if (state->pool == NULL) {
/* The pthreadpool_tevent is already gone */
@@ -367,6 +465,11 @@ static int pthreadpool_tevent_job_signal(int jobid,
}
#ifdef HAVE_PTHREAD
+ ret = pthread_mutex_lock(&state->pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
+
for (g = state->pool->glue_list; g != NULL; g = g->next) {
if (g->ev == state->ev) {
tctx = g->tctx;
@@ -374,6 +477,11 @@ static int pthreadpool_tevent_job_signal(int jobid,
}
}
+ ret = pthread_mutex_unlock(&state->pool->glue_mutex);
+ if (ret != 0) {
+ return ret;
+ }
+
if (tctx == NULL) {
abort();
}
@@ -402,8 +510,17 @@ static void pthreadpool_tevent_job_done(struct
tevent_context *ctx,
private_data, struct pthreadpool_tevent_job_state);
if (state->pool != NULL) {
+ int ret;
+ ret = pthread_mutex_lock(&state->pool->jobs_mutex);
+ if (tevent_req_error(state->req, ret)) {
+ return;
+ }
DLIST_REMOVE(state->pool->jobs, state);
+ ret = pthread_mutex_unlock(&state->pool->jobs_mutex);
state->pool = NULL;
+ if (tevent_req_error(state->req, ret)) {
+ return;
+ }
}
if (state->req == NULL) {
diff --git a/lib/pthreadpool/test_pthreadpool.c
b/lib/pthreadpool/test_pthreadpool.c
new file mode 100644
index 00000000000..0517c0d24c4
--- /dev/null
+++ b/lib/pthreadpool/test_pthreadpool.c
@@ -0,0 +1,636 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * cmocka tests for pthreadpool implementation
+ * Copyright (C) 2025
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <setjmp.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <cmocka.h>
+
+#include <talloc.h>
+
+#include "pthreadpool.h"
+
+/* Test state structure */
+struct test_state {
+ struct pthreadpool *pool;
+ int signal_received;
+ int signal_job_id;
+ void (*signal_job_fn)(void *);
+ void *signal_job_fn_data;
+ /* protect test_state */
+ pthread_mutex_t mutex;
+};
+
+struct mutex_int {
+ int num;
+ /* protect num */
+ pthread_mutex_t mutex;
+};
+
+/* Signal function for testing */
+static int test_signal_fn(int jobid,
+ void (*job_fn)(void *private_data),
+ void *job_fn_private_data,
+ void *private_data)
+{
+ int ret;
+ struct test_state *state = talloc_get_type_abort(private_data,
+ struct test_state);
+
+ ret = pthread_mutex_lock(&state->mutex);
+ assert_int_equal(ret, 0);
+ state->signal_received++;
+ state->signal_job_id = jobid;
+ state->signal_job_fn = job_fn;
+ state->signal_job_fn_data = job_fn_private_data;
+ ret = pthread_mutex_unlock(&state->mutex);
+ assert_int_equal(ret, 0);
+
+ return 0;
+}
+
+static void safe_increment(struct mutex_int *counter)
+{
+ int ret;
+
+ ret = pthread_mutex_lock(&counter->mutex);
+ assert_int_equal(ret, 0);
+ counter->num++;
+ ret = pthread_mutex_unlock(&counter->mutex);
+ assert_int_equal(ret, 0);
+}
+
+/* Simple job function that increments a counter (in a thread safe way)*/
+static void increment_job(void *private_data)
+{
+ struct mutex_int *num = (struct mutex_int*)private_data;
+ safe_increment(num);
+}
+
+/* Job function that sleeps briefly */
+static void sleep_job(void *private_data)
+{
+ int *duration = (int *)private_data;
+ usleep(*duration * 1000); /* Convert ms to microseconds */
+}
+
+/* Setup function */
+static int setup(void **state)
+{
+ struct test_state *test_state = NULL;
+ int ret;
+
+ test_state = talloc_zero(NULL, struct test_state);
+ assert_non_null(test_state);
+
+ ret = pthread_mutex_init(&test_state->mutex, NULL);
+ assert_int_equal(ret, 0);
+ *state = test_state;
+ return 0;
+}
+
+/* Teardown function */
+static int teardown(void **state)
+{
+ struct test_state *test_state = talloc_get_type_abort(
+ *state, struct test_state);
+
+ if (test_state->pool != NULL) {
+ pthreadpool_destroy(test_state->pool);
+ test_state->pool = NULL;
+ }
+ pthread_mutex_destroy(&test_state->mutex);
+ TALLOC_FREE(test_state);
+ return 0;
+}
+
+/* Test: Initialize pool with different max_threads values */
+static void test_pthreadpool_init(void **state)
+{
+ struct test_state *test_state = talloc_get_type_abort(
+ *state, struct test_state);
+ int ret;
+
+ /* Test with unlimited threads (0) */
+ ret = pthreadpool_init(0,
+ &test_state->pool,
+ test_signal_fn,
+ test_state);
+ assert_int_equal(ret, 0);
+ assert_non_null(test_state->pool);
+ assert_int_equal(pthreadpool_max_threads(test_state->pool), 0);
+
+ pthreadpool_destroy(test_state->pool);
+ test_state->pool = NULL;
+
+ /* Test with limited threads */
+ ret = pthreadpool_init(4,
+ &test_state->pool,
+ test_signal_fn,
+ test_state);
+ assert_int_equal(ret, 0);
+ assert_non_null(test_state->pool);
+ assert_int_equal(pthreadpool_max_threads(test_state->pool), 4);
+
+ pthreadpool_destroy(test_state->pool);
+ test_state->pool = NULL;
+
+ /* Test with 1 thread */
+ ret = pthreadpool_init(1,
+ &test_state->pool,
+ test_signal_fn,
+ test_state);
+ assert_int_equal(ret, 0);
+ assert_non_null(test_state->pool);
+ assert_int_equal(pthreadpool_max_threads(test_state->pool), 1);
+}
+
+/* Test: Add and execute a simple job */
+static void test_pthreadpool_add_job_simple(void **state)
+{
+ struct test_state *test_state = talloc_get_type_abort(
+ *state, struct test_state);
+ int ret;
+ struct mutex_int counter = {0};
+ int timeout;
+ int signal_received = 0;
+
+ ret = pthreadpool_init(2,
+ &test_state->pool,
+ test_signal_fn,
+ test_state);
+ assert_int_equal(ret, 0);
+
+ ret = pthread_mutex_init(&counter.mutex, NULL);
+ assert_int_equal(ret, 0);
+
+ /* Add a job */
+ ret = pthreadpool_add_job(test_state->pool, 1, increment_job, &counter);
+ assert_int_equal(ret, 0);
+
+ /* Wait for job completion (with timeout) */
+ timeout = 0;
+ do {
+ ret = pthread_mutex_lock(&test_state->mutex);
+ assert_int_equal(ret, 0);
+ signal_received = test_state->signal_received;
+ ret = pthread_mutex_unlock(&test_state->mutex);
+ assert_int_equal(ret, 0);
+ usleep(10000); /* 10ms */
+ timeout++;
+
+ } while (signal_received == 0 && timeout < 100);
+
+ /* Verify job was executed */
+ assert_int_equal(counter.num, 1);
+ assert_int_equal(test_state->signal_received, 1);
+ assert_int_equal(test_state->signal_job_id, 1);
+ assert_ptr_equal(test_state->signal_job_fn, increment_job);
+ assert_ptr_equal(test_state->signal_job_fn_data, &counter);
+ pthread_mutex_destroy(&counter.mutex);
+}
+
+/* Test: Add multiple jobs */
+static void test_pthreadpool_add_multiple_jobs(void **state)
+{
+ struct test_state *test_state = talloc_get_type_abort(
+ *state, struct test_state);
+ int ret;
+ struct mutex_int counter = {0};
+ int i;
+ int timeout;
+ int signal_received = 0;
+
+ ret = pthreadpool_init(4,
+ &test_state->pool,
+ test_signal_fn,
+ test_state);
+ assert_int_equal(ret, 0);
+
+ ret = pthread_mutex_init(&counter.mutex, NULL);
+ assert_int_equal(ret, 0);
+
+ /* Add multiple jobs */
+ for (i = 0; i < 10; i++) {
+ ret = pthreadpool_add_job(test_state->pool,
+ i,
+ increment_job,
+ &counter);
+ assert_int_equal(ret, 0);
--
Samba Shared Repository