This patch changes the ls_clear_proc_locks to a spinlock because there
is no need to handle it as a mutex as there is no sleepable context when
ls_clear_proc_locks is held. This allows us to call those functionality
in non-sleepable contexts.

Signed-off-by: Alexander Aring <aahri...@redhat.com>
---
 fs/dlm/dlm_internal.h | 2 +-
 fs/dlm/lock.c         | 8 ++++----
 fs/dlm/lockspace.c    | 2 +-
 fs/dlm/user.c         | 4 ++--
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 8aca8085d24e..e34c3d2639a5 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -661,7 +661,7 @@ struct dlm_ls {
        spinlock_t              ls_recover_idr_lock;
        wait_queue_head_t       ls_wait_general;
        wait_queue_head_t       ls_recover_lock_wait;
-       struct mutex            ls_clear_proc_locks;
+       spinlock_t              ls_clear_proc_locks;
 
        struct list_head        ls_root_list;   /* root resources */
        struct rw_semaphore     ls_root_sem;    /* protect root_list */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index c41aa8ab3230..65a7a0631ec8 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -6215,7 +6215,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
 {
        struct dlm_lkb *lkb = NULL;
 
-       mutex_lock(&ls->ls_clear_proc_locks);
+       spin_lock(&ls->ls_clear_proc_locks);
        if (list_empty(&proc->locks))
                goto out;
 
@@ -6227,7 +6227,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
        else
                lkb->lkb_flags |= DLM_IFL_DEAD;
  out:
-       mutex_unlock(&ls->ls_clear_proc_locks);
+       spin_unlock(&ls->ls_clear_proc_locks);
        return lkb;
 }
 
@@ -6264,7 +6264,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct 
dlm_user_proc *proc)
                dlm_put_lkb(lkb);
        }
 
-       mutex_lock(&ls->ls_clear_proc_locks);
+       spin_lock(&ls->ls_clear_proc_locks);
 
        /* in-progress unlocks */
        list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
@@ -6280,7 +6280,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct 
dlm_user_proc *proc)
                dlm_put_lkb(lkb);
        }
 
-       mutex_unlock(&ls->ls_clear_proc_locks);
+       spin_unlock(&ls->ls_clear_proc_locks);
        dlm_unlock_recovery(ls);
 }
 
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 56c79926e7be..41a6504cfab5 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -584,7 +584,7 @@ static int new_lockspace(const char *name, const char 
*cluster,
        atomic_set(&ls->ls_requestqueue_cnt, 0);
        init_waitqueue_head(&ls->ls_requestqueue_wait);
        mutex_init(&ls->ls_requestqueue_mutex);
-       mutex_init(&ls->ls_clear_proc_locks);
+       spin_lock_init(&ls->ls_clear_proc_locks);
 
        /* Due backwards compatibility with 3.1 we need to use maximum
         * possible dlm message size to be sure the message will fit and
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 99e8f0744513..df6215c73239 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -184,7 +184,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, 
int mode,
                return;
 
        ls = lkb->lkb_resource->res_ls;
-       mutex_lock(&ls->ls_clear_proc_locks);
+       spin_lock(&ls->ls_clear_proc_locks);
 
        /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
           can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
@@ -230,7 +230,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, 
int mode,
                spin_unlock(&proc->locks_spin);
        }
  out:
-       mutex_unlock(&ls->ls_clear_proc_locks);
+       spin_unlock(&ls->ls_clear_proc_locks);
 }
 
 static int device_user_lock(struct dlm_user_proc *proc,
-- 
2.31.1

Reply via email to