From: Stanislav Kinsburskiy <skinsbur...@parallels.com> Here is the race:
CPU #0 CPU#1 cleanup_mnt nfs41_callback_svc (get xprt from the list) nfs_callback_down ... ... ... svc_close_net ... ... ... svc_xprt_free ... svc_bc_sock_free bc_svc_process kfree(xprt) svc_process_common rqstp->rq_xprt->xpt_ops (use after free) The problem is that per-net SUNRPC transports shutdown is done regardless current callback execution. This is a race leading to transport use-after-free in callback handler. This patch fixes it in stright-forward way. I.e. it protects callback execution with the same mutex used for per-net data creation and destruction. Hopefully, it won't slow down NFS client significantly. https://jira.sw.ru/browse/PSBM-75751 v6: destroy all per-net backchannel requests only for NFSv4.1 v5: destroy all per-net backchannel requests before transports on in nfs_callback_down_net v4: use another mutex to protect callback execution agains per-net transports shutdown. This guarantees, that transports won't be destroyed by shutdown callback while execution is in progress and vice versa. v3: Fix mutex deadlock, when shutdown callback waits for thread to exit (with mutex taken), while thread wait for the mutex to take. The idea is to simply check if thread has to exit, if mutex lock has failed. This is a busy loop, but it shouldn't happend often and for long. Signed-off-by: Stanislav Kinsburskiy <skinsbur...@virtuozzo.com> --- fs/nfs/callback.c | 20 ++++++++++++++++++++ include/linux/sunrpc/svc.h | 1 + 2 files changed, 21 insertions(+) diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 0beb275..feffccf 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -99,6 +99,8 @@ nfs4_callback_up(struct svc_serv *serv) } #if defined(CONFIG_NFS_V4_1) +static DEFINE_MUTEX(nfs41_callback_mutex); + /* * The callback service for NFSv4.1 callbacks */ @@ -117,6 +119,12 @@ nfs41_callback_svc(void *vrqstp) if (try_to_freeze()) continue; + mutex_lock(&nfs41_callback_mutex); + if (kthread_should_stop()) { + mutex_unlock(&nfs41_callback_mutex); + return 0; + } + prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { @@ -129,8 +137,10 @@ nfs41_callback_svc(void *vrqstp) error = bc_svc_process(serv, req, rqstp); dprintk("bc_svc_process() returned w/ error code= %d\n", error); + mutex_unlock(&nfs41_callback_mutex); } else { spin_unlock_bh(&serv->sv_cb_lock); + mutex_unlock(&nfs41_callback_mutex); schedule(); finish_wait(&serv->sv_cb_waitq, &wq); } @@ -139,6 +149,13 @@ nfs41_callback_svc(void *vrqstp) return 0; } +static void nfs41_callback_down_net(struct svc_serv *serv, struct net *net) +{ + mutex_lock(&nfs41_callback_mutex); + bc_svc_flush_queue_net(serv, net); + mutex_unlock(&nfs41_callback_mutex); +} + /* * Bring up the NFSv4.1 callback service */ @@ -150,6 +167,7 @@ nfs41_callback_up(struct svc_serv *serv) INIT_LIST_HEAD(&serv->sv_cb_list); spin_lock_init(&serv->sv_cb_lock); init_waitqueue_head(&serv->sv_cb_waitq); + serv->svc_cb_down_net = nfs41_callback_down_net; rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE); dprintk("--> %s return %d\n", __func__, PTR_ERR_OR_ZERO(rqstp)); return rqstp; @@ -242,6 +260,8 @@ static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struc return; dprintk("NFS: destroy per-net callback data; net=%p\n", net); + if (serv->svc_cb_down_net) + serv->svc_cb_down_net(serv, net); svc_shutdown_net(serv, net); } diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fe70ff0..c04ef80 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -108,6 +108,7 @@ struct svc_serv { wait_queue_head_t sv_cb_waitq; /* sleep here if there are no * entries in the svc_cb_list */ struct svc_xprt *sv_bc_xprt; /* callback on fore channel */ + void (*svc_cb_down_net)(struct svc_serv *serv, struct net *net); #endif /* CONFIG_SUNRPC_BACKCHANNEL */ }; _______________________________________________ Devel mailing list Devel@openvz.org https://lists.openvz.org/mailman/listinfo/devel