There is no need to use a mutex in those hot path sections. We change it
to spin lock to serve callbacks more faster by not allowing schedule.
The locked sections will not be locked for a long time.

Signed-off-by: Alexander Aring <aahri...@redhat.com>
---
 fs/dlm/ast.c          | 8 ++++----
 fs/dlm/dlm_internal.h | 2 +-
 fs/dlm/lock.c         | 2 +-
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index daaa0dff6ef4..3e76ec75bc55 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -190,7 +190,7 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int 
mode, int status,
                return;
        }
 
-       mutex_lock(&lkb->lkb_cb_mutex);
+       spin_lock(&lkb->lkb_cb_lock);
        prev_seq = lkb->lkb_callbacks[0].seq;
 
        rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
@@ -209,7 +209,7 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int 
mode, int status,
                spin_unlock(&ls->ls_cb_lock);
        }
  out:
-       mutex_unlock(&lkb->lkb_cb_mutex);
+       spin_unlock(&lkb->lkb_cb_lock);
 }
 
 void dlm_callback_work(struct work_struct *work)
@@ -223,7 +223,7 @@ void dlm_callback_work(struct work_struct *work)
 
        memset(&callbacks, 0, sizeof(callbacks));
 
-       mutex_lock(&lkb->lkb_cb_mutex);
+       spin_lock(&lkb->lkb_cb_lock);
        if (!lkb->lkb_callbacks[0].seq) {
                /* no callback work exists, shouldn't happen */
                log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
@@ -244,7 +244,7 @@ void dlm_callback_work(struct work_struct *work)
                dlm_print_lkb(lkb);
                dlm_dump_lkb_callbacks(lkb);
        }
-       mutex_unlock(&lkb->lkb_cb_mutex);
+       spin_unlock(&lkb->lkb_cb_lock);
 
        castfn = lkb->lkb_astfn;
        bastfn = lkb->lkb_bastfn;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 647a6a61531c..11f3a5c67bdd 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -268,7 +268,7 @@ struct dlm_lkb {
        unsigned long           lkb_timeout_cs;
 #endif
 
-       struct mutex            lkb_cb_mutex;
+       spinlock_t              lkb_cb_lock;
        struct work_struct      lkb_cb_work;
        struct list_head        lkb_cb_list; /* for ls_cb_delay or proc->asts */
        struct dlm_callback     lkb_callbacks[DLM_CALLBACKS_SIZE];
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 0b1bc24536ce..40e4e4a1c582 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1218,7 +1218,7 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb 
**lkb_ret,
        INIT_LIST_HEAD(&lkb->lkb_time_list);
 #endif
        INIT_LIST_HEAD(&lkb->lkb_cb_list);
-       mutex_init(&lkb->lkb_cb_mutex);
+       spin_lock_init(&lkb->lkb_cb_lock);
        INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
 
        idr_preload(GFP_NOFS);
-- 
2.31.1

Reply via email to