>From cae4f92de97038b442e06d4d33ba4d36d0754c7b Mon Sep 17 00:00:00 2001
From: Fernando Guzman Lugo <x0095...@ti.com>
Date: Fri, 19 Mar 2010 14:04:28 -0600
Subject: [PATCH] DSPBRIDGE: replace sync_enter/leave_cs for tasklets with 
spin_lock_bh

This patch replaces sync_enter/leave_cs function calls used in
 tasklets with spin_lock/unlock_bh functions.

Signed-off-by: Fernando Guzman Lugo <x0095...@ti.com>
---
 arch/arm/plat-omap/include/dspbridge/_chnl_sm.h |    2 +-
 drivers/dsp/bridge/services/ntfy.c              |   34 +++++++++-----------
 drivers/dsp/bridge/wmd/_msg_sm.h                |    2 +-
 drivers/dsp/bridge/wmd/chnl_sm.c                |   25 ++++++--------
 drivers/dsp/bridge/wmd/msg_sm.c                 |   39 ++++++++++-------------
 5 files changed, 45 insertions(+), 57 deletions(-)

diff --git a/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h 
b/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h
index 3c6f891..d961578 100644
--- a/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h
+++ b/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h
@@ -116,7 +116,7 @@ struct chnl_mgr {
        u32 dw_output_mask;     /* Host output channels w/ full buffers */
        u32 dw_last_output;     /* Last output channel fired from DPC */
        /* Critical section object handle */
-       struct sync_csobject *hcs_obj;
+       spinlock_t chnl_mgr_lock;
        u32 word_size;          /* Size in bytes of DSP word */
        u32 max_channels;       /* Total number of channels */
        u32 open_channels;      /* Total number of open channels */
diff --git a/drivers/dsp/bridge/services/ntfy.c 
b/drivers/dsp/bridge/services/ntfy.c
index 6eb222f..de7ee0c 100644
--- a/drivers/dsp/bridge/services/ntfy.c
+++ b/drivers/dsp/bridge/services/ntfy.c
@@ -44,7 +44,7 @@
 struct ntfy_object {
        u32 dw_signature;       /* For object validation */
        struct lst_list *notify_list;   /* List of notifier objects */
-       struct sync_csobject *sync_obj; /* For critical sections */
+       spinlock_t ntfy_lock;   /* For critical sections */
 };
 
 /*
@@ -84,20 +84,18 @@ dsp_status ntfy_create(struct ntfy_object **phNtfy)
        MEM_ALLOC_OBJECT(notify_obj, struct ntfy_object, NTFY_SIGNATURE);
 
        if (notify_obj) {
+               spin_lock_init(&notify_obj->ntfy_lock);
 
-               status = sync_initialize_dpccs(&notify_obj->sync_obj);
-               if (DSP_SUCCEEDED(status)) {
-                       notify_obj->notify_list =
-                           mem_calloc(sizeof(struct lst_list), MEM_NONPAGED);
-                       if (notify_obj->notify_list == NULL) {
-                               (void)sync_delete_cs(notify_obj->sync_obj);
-                               MEM_FREE_OBJECT(notify_obj);
-                               status = DSP_EMEMORY;
-                       } else {
-                               INIT_LIST_HEAD(&notify_obj->notify_list->head);
-                               *phNtfy = notify_obj;
-                       }
+               notify_obj->notify_list = mem_calloc(sizeof(struct lst_list),
+                                                       MEM_NONPAGED);
+               if (!notify_obj->notify_list) {
+                       MEM_FREE_OBJECT(notify_obj);
+                       status = DSP_EMEMORY;
+               } else {
+                       INIT_LIST_HEAD(&notify_obj->notify_list->head);
+                       *phNtfy = notify_obj;
                }
+
        } else {
                status = DSP_EMEMORY;
        }
@@ -129,8 +127,6 @@ void ntfy_delete(struct ntfy_object *ntfy_obj)
                DBC_ASSERT(LST_IS_EMPTY(ntfy_obj->notify_list));
                kfree(ntfy_obj->notify_list);
        }
-       if (ntfy_obj->sync_obj)
-               (void)sync_delete_cs(ntfy_obj->sync_obj);
 
        MEM_FREE_OBJECT(ntfy_obj);
 }
@@ -173,7 +169,7 @@ void ntfy_notify(struct ntfy_object *ntfy_obj, u32 
event_mask)
         *  event_mask events.
         */
 
-       (void)sync_enter_cs(ntfy_obj->sync_obj);
+       spin_lock_bh(&ntfy_obj->ntfy_lock);
 
        notifier_obj = (struct notifier *)lst_first(ntfy_obj->notify_list);
        while (notifier_obj != NULL) {
@@ -189,7 +185,7 @@ void ntfy_notify(struct ntfy_object *ntfy_obj, u32 
event_mask)
                                                notifier_obj);
        }
 
-       (void)sync_leave_cs(ntfy_obj->sync_obj);
+       spin_unlock_bh(&ntfy_obj->ntfy_lock);
 }
 
 /*
@@ -223,7 +219,7 @@ dsp_status ntfy_register(struct ntfy_object *ntfy_obj,
        if (DSP_FAILED(status))
                return status;
 
-       (void)sync_enter_cs(ntfy_obj->sync_obj);
+       spin_lock_bh(&ntfy_obj->ntfy_lock);
 
        notifier_obj = (struct notifier *)lst_first(ntfy_obj->notify_list);
        while (notifier_obj != NULL) {
@@ -281,7 +277,7 @@ dsp_status ntfy_register(struct ntfy_object *ntfy_obj,
                        notifier_obj->event_mask = event_mask;
                }
        }
-       (void)sync_leave_cs(ntfy_obj->sync_obj);
+       spin_unlock_bh(&ntfy_obj->ntfy_lock);
        return status;
 }
 
diff --git a/drivers/dsp/bridge/wmd/_msg_sm.h b/drivers/dsp/bridge/wmd/_msg_sm.h
index fb9a19d..bb28d4f 100644
--- a/drivers/dsp/bridge/wmd/_msg_sm.h
+++ b/drivers/dsp/bridge/wmd/_msg_sm.h
@@ -86,7 +86,7 @@ struct msg_mgr {
 
        struct io_mgr *hio_mgr; /* IO manager */
        struct lst_list *queue_list;    /* List of MSG_QUEUEs */
-       struct sync_csobject *sync_cs;  /* For critical sections */
+       spinlock_t msg_mgr_lock;        /* For critical sections */
        /* Signalled when MsgFrame is available */
        struct sync_object *sync_event;
        struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
diff --git a/drivers/dsp/bridge/wmd/chnl_sm.c b/drivers/dsp/bridge/wmd/chnl_sm.c
index 0c14bc2..e0a363c 100644
--- a/drivers/dsp/bridge/wmd/chnl_sm.c
+++ b/drivers/dsp/bridge/wmd/chnl_sm.c
@@ -169,7 +169,7 @@ func_cont:
         * If DPC is scheduled in process context (iosm_schedule) and any
         * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
         * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
-       sync_enter_cs(chnl_mgr_obj->hcs_obj);
+       spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
        omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
        if (pchnl->chnl_type == CHNL_PCPY) {
                /* This is a processor-copy channel. */
@@ -231,7 +231,7 @@ func_cont:
 
        }
        omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
-       sync_leave_cs(chnl_mgr_obj->hcs_obj);
+       spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
        if (mb_val != 0)
                io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);
 
@@ -274,7 +274,7 @@ dsp_status bridge_chnl_cancel_io(struct chnl_object 
*chnl_obj)
 
        /*  Mark this channel as cancelled, to prevent further IORequests or
         *  IORequests or dispatching. */
-       sync_enter_cs(chnl_mgr_obj->hcs_obj);
+       spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
        pchnl->dw_state |= CHNL_STATECANCEL;
        if (LST_IS_EMPTY(pchnl->pio_requests))
                goto func_cont;
@@ -304,7 +304,7 @@ dsp_status bridge_chnl_cancel_io(struct chnl_object 
*chnl_obj)
                }
        }
 func_cont:
-       sync_leave_cs(chnl_mgr_obj->hcs_obj);
+       spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
 func_end:
        return status;
 }
@@ -342,7 +342,9 @@ func_cont:
                pchnl->dw_signature = 0x0000;
                /* Free the slot in the channel manager: */
                pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
+               spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
                pchnl->chnl_mgr_obj->open_channels -= 1;
+               spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
                if (pchnl->ntfy_obj) {
                        ntfy_delete(pchnl->ntfy_obj);
                        pchnl->ntfy_obj = NULL;
@@ -424,9 +426,7 @@ dsp_status bridge_chnl_create(OUT struct chnl_mgr 
**phChnlMgr,
                        chnl_mgr_obj->dw_last_output = 0;
                        chnl_mgr_obj->hdev_obj = hdev_obj;
                        if (DSP_SUCCEEDED(status))
-                               status =
-                                   sync_initialize_dpccs
-                                   (&chnl_mgr_obj->hcs_obj);
+                               spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
                } else {
                        status = DSP_EMEMORY;
                }
@@ -466,9 +466,6 @@ dsp_status bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
                                dev_dbg(bridge, "%s: Error status 0x%x\n",
                                        __func__, status);
                }
-               /* release critical section */
-               if (chnl_mgr_obj->hcs_obj)
-                       sync_delete_cs(chnl_mgr_obj->hcs_obj);
 
                /* Free channel manager object: */
                kfree(chnl_mgr_obj->ap_channel);
@@ -631,7 +628,7 @@ dsp_status bridge_chnl_get_ioc(struct chnl_object 
*chnl_obj, u32 dwTimeOut,
                }
        }
        /* See comment in AddIOReq */
-       sync_enter_cs(pchnl->chnl_mgr_obj->hcs_obj);
+       spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
        omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
        if (dequeue_ioc) {
                /* Dequeue IOC and set pIOC; */
@@ -684,7 +681,7 @@ dsp_status bridge_chnl_get_ioc(struct chnl_object 
*chnl_obj, u32 dwTimeOut,
                sync_reset_event(pchnl->sync_event);
        }
        omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
-       sync_leave_cs(pchnl->chnl_mgr_obj->hcs_obj);
+       spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
        if (dequeue_ioc
            && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
                if (!(ioc.pbuf < (void *)USERMODE_ADDR))
@@ -889,9 +886,9 @@ dsp_status bridge_chnl_open(OUT struct chnl_object **phChnl,
        } else {
                /* Insert channel object in channel manager: */
                chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
-               sync_enter_cs(chnl_mgr_obj->hcs_obj);
+               spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
                chnl_mgr_obj->open_channels++;
-               sync_leave_cs(chnl_mgr_obj->hcs_obj);
+               spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
                /* Return result... */
                pchnl->dw_signature = CHNL_SIGNATURE;
                pchnl->dw_state = CHNL_STATEREADY;
diff --git a/drivers/dsp/bridge/wmd/msg_sm.c b/drivers/dsp/bridge/wmd/msg_sm.c
index 4b65b8b..73f4e6b 100644
--- a/drivers/dsp/bridge/wmd/msg_sm.c
+++ b/drivers/dsp/bridge/wmd/msg_sm.c
@@ -95,14 +95,12 @@ dsp_status bridge_msg_create(OUT struct msg_mgr **phMsgMgr,
                        INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
                        INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
                        INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
-                       status = sync_initialize_dpccs(&msg_mgr_obj->sync_cs);
+                       spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
                }
 
                /*  Create an event to be used by bridge_msg_put() in waiting
                 *  for an available free frame from the message manager. */
-               if (DSP_SUCCEEDED(status))
-                       status =
-                           sync_open_event(&msg_mgr_obj->sync_event, NULL);
+               status = sync_open_event(&msg_mgr_obj->sync_event, NULL);
 
                if (DSP_SUCCEEDED(status))
                        *phMsgMgr = msg_mgr_obj;
@@ -182,7 +180,7 @@ dsp_status bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 
        if (DSP_SUCCEEDED(status)) {
                /* Enter critical section */
-               (void)sync_enter_cs(hmsg_mgr->sync_cs);
+               spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
                /* Initialize message frames and put in appropriate queues */
                for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
                        status = add_new_msg(hmsg_mgr->msg_free_list);
@@ -205,7 +203,7 @@ dsp_status bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 
                }
                /* Exit critical section */
-               (void)sync_leave_cs(hmsg_mgr->sync_cs);
+               spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
        } else {
                delete_msg_queue(msg_q, 0);
        }
@@ -248,7 +246,7 @@ void bridge_msg_delete_queue(struct msg_queue 
*msg_queue_obj)
                io_msg_pend = msg_queue_obj->io_msg_pend;
        }
        /* Remove message queue from hmsg_mgr->queue_list */
-       (void)sync_enter_cs(hmsg_mgr->sync_cs);
+       spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
        lst_remove_elem(hmsg_mgr->queue_list,
                        (struct list_head *)msg_queue_obj);
        /* Free the message queue object */
@@ -258,7 +256,7 @@ void bridge_msg_delete_queue(struct msg_queue 
*msg_queue_obj)
        if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
                sync_reset_event(hmsg_mgr->sync_event);
 func_cont:
-       (void)sync_leave_cs(hmsg_mgr->sync_cs);
+       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 func_end:
        return;
 }
@@ -290,7 +288,7 @@ dsp_status bridge_msg_get(struct msg_queue *msg_queue_obj,
        }
 
        /* Enter critical section */
-       (void)sync_enter_cs(hmsg_mgr->sync_cs);
+       spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
        /* If a message is already there, get it */
        if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
                msg_frame_obj = (struct msg_frame *)
@@ -312,7 +310,7 @@ dsp_status bridge_msg_get(struct msg_queue *msg_queue_obj,
 
        }
        /* Exit critical section */
-       (void)sync_leave_cs(hmsg_mgr->sync_cs);
+       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
        if (DSP_SUCCEEDED(status) && !got_msg) {
                /*  Wait til message is available, timeout, or done. We don't
                 *  have to schedule the DPC, since the DSP will send messages
@@ -322,11 +320,11 @@ dsp_status bridge_msg_get(struct msg_queue *msg_queue_obj,
                status = sync_wait_on_multiple_events(syncs, 2, utimeout,
                                                      &index);
                /* Enter critical section */
-               (void)sync_enter_cs(hmsg_mgr->sync_cs);
+               spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
                if (msg_queue_obj->done) {
                        msg_queue_obj->io_msg_pend--;
                        /* Exit critical section */
-                       (void)sync_leave_cs(hmsg_mgr->sync_cs);
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
                        /*  Signal that we're not going to access msg_queue_obj
                         *  anymore, so it can be deleted. */
                        (void)sync_set_event(msg_queue_obj->sync_done_ack);
@@ -354,7 +352,7 @@ dsp_status bridge_msg_get(struct msg_queue *msg_queue_obj,
                                sync_set_event(msg_queue_obj->sync_event);
 
                        /* Exit critical section */
-                       (void)sync_leave_cs(hmsg_mgr->sync_cs);
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
                }
        }
 func_end:
@@ -386,7 +384,7 @@ dsp_status bridge_msg_put(struct msg_queue *msg_queue_obj,
                goto func_end;
        }
 
-       (void)sync_enter_cs(hmsg_mgr->sync_cs);
+       spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 
        /* If a message frame is available, use it */
        if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
@@ -405,7 +403,7 @@ dsp_status bridge_msg_put(struct msg_queue *msg_queue_obj,
                        sync_reset_event(hmsg_mgr->sync_event);
 
                /* Release critical section before scheduling DPC */
-               (void)sync_leave_cs(hmsg_mgr->sync_cs);
+               spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
                /* Schedule a DPC, to do the actual data transfer: */
                iosm_schedule(hmsg_mgr->hio_mgr);
        } else {
@@ -414,7 +412,7 @@ dsp_status bridge_msg_put(struct msg_queue *msg_queue_obj,
                else
                        msg_queue_obj->io_msg_pend++;
 
-               (void)sync_leave_cs(hmsg_mgr->sync_cs);
+               spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
        }
        if (DSP_SUCCEEDED(status) && !put_msg) {
                /* Wait til a free message frame is available, timeout,
@@ -426,11 +424,11 @@ dsp_status bridge_msg_put(struct msg_queue *msg_queue_obj,
                if (DSP_FAILED(status))
                        goto func_end;
                /* Enter critical section */
-               (void)sync_enter_cs(hmsg_mgr->sync_cs);
+               spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
                if (msg_queue_obj->done) {
                        msg_queue_obj->io_msg_pend--;
                        /* Exit critical section */
-                       (void)sync_leave_cs(hmsg_mgr->sync_cs);
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
                        /*  Signal that we're not going to access msg_queue_obj
                         *  anymore, so it can be deleted. */
                        (void)sync_set_event(msg_queue_obj->sync_done_ack);
@@ -467,7 +465,7 @@ dsp_status bridge_msg_put(struct msg_queue *msg_queue_obj,
                                sync_set_event(hmsg_mgr->sync_event);
 func_cont:
                        /* Exit critical section */
-                       (void)sync_leave_cs(hmsg_mgr->sync_cs);
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
                }
        }
 func_end:
@@ -578,9 +576,6 @@ static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
        if (hmsg_mgr->sync_event)
                sync_close_event(hmsg_mgr->sync_event);
 
-       if (hmsg_mgr->sync_cs)
-               sync_delete_cs(hmsg_mgr->sync_cs);
-
        MEM_FREE_OBJECT(hmsg_mgr);
 func_end:
        return;
-- 
1.6.0.4

--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to