From: Leonid Ravich <leonid.rav...@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <leonid.rav...@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
        struct list_head                ls_rcv_list;
        struct list_head                ls_req_list;
        struct list_head                ls_busylist;
-       struct list_head                assoc_list;
+       struct hlist_head               assoc_list;
        struct list_head                host_list;
        struct ida                      assoc_cnt;
        struct nvmet_fc_port_entry      *pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
        struct nvmet_fc_tgtport         *tgtport;
        struct nvmet_fc_hostport        *hostport;
        struct nvmet_fc_ls_iod          *rcv_disconn;
-       struct list_head                a_list;
+       struct hlist_node               a_list;
        struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES + 1];
        struct kref                     ref;
        struct work_struct              del_work;
+       struct rcu_head         rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport 
*tgtport,
        struct nvmet_fc_tgt_queue *queue;
        u64 association_id = nvmet_fc_getassociationid(connection_id);
        u16 qid = nvmet_fc_getqueueid(connection_id);
-       unsigned long flags;
 
        if (qid > NVMET_NR_QUEUES)
                return NULL;
 
-       spin_lock_irqsave(&tgtport->lock, flags);
-       list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
                if (association_id == assoc->association_id) {
                        queue = assoc->queues[qid];
                        if (queue &&
                            (!atomic_read(&queue->connected) ||
                             !nvmet_fc_tgt_q_get(queue)))
                                queue = NULL;
-                       spin_unlock_irqrestore(&tgtport->lock, flags);
+                       rcu_read_unlock();
                        return queue;
                }
        }
-       spin_unlock_irqrestore(&tgtport->lock, flags);
+       rcu_read_unlock();
        return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport 
*tgtport, void *hosthandle)
 
        assoc->tgtport = tgtport;
        assoc->a_id = idx;
-       INIT_LIST_HEAD(&assoc->a_list);
+       INIT_HLIST_NODE(&assoc->a_list);
        kref_init(&assoc->ref);
        INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
        atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport 
*tgtport, void *hosthandle)
 
                spin_lock_irqsave(&tgtport->lock, flags);
                needrandom = false;
-               list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+               hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
                        if (ran == tmpassoc->association_id) {
                                needrandom = true;
                                break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport 
*tgtport, void *hosthandle)
                }
                if (!needrandom) {
                        assoc->association_id = ran;
-                       list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+                       hlist_add_tail_rcu(&assoc->a_list, 
&tgtport->assoc_list);
                }
                spin_unlock_irqrestore(&tgtport->lock, flags);
        }
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport 
*tgtport, void *hosthandle)
        return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+       struct nvmet_fc_tgt_assoc *assoc =
+               container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+       struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+       kfree(assoc);
+       dev_info(tgtport->dev,
+               "{%d:%d} Association freed\n",
+               tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
        nvmet_fc_free_hostport(assoc->hostport);
        spin_lock_irqsave(&tgtport->lock, flags);
-       list_del(&assoc->a_list);
+       hlist_del_rcu(&assoc->a_list);
        oldls = assoc->rcv_disconn;
        spin_unlock_irqrestore(&tgtport->lock, flags);
        /* if pending Rcv Disconnect Association LS, send rsp now */
        if (oldls)
                nvmet_fc_xmt_ls_rsp(tgtport, oldls);
        ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-       dev_info(tgtport->dev,
-               "{%d:%d} Association freed\n",
-               tgtport->fc_target_port.port_num, assoc->a_id);
-       kfree(assoc);
+       call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
        nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport 
*tgtport,
        unsigned long flags;
 
        spin_lock_irqsave(&tgtport->lock, flags);
-       list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+       hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
                if (association_id == assoc->association_id) {
                        ret = assoc;
                        if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info 
*pinfo,
        INIT_LIST_HEAD(&newrec->ls_rcv_list);
        INIT_LIST_HEAD(&newrec->ls_req_list);
        INIT_LIST_HEAD(&newrec->ls_busylist);
-       INIT_LIST_HEAD(&newrec->assoc_list);
+       INIT_HLIST_HEAD(&newrec->assoc_list);
        INIT_LIST_HEAD(&newrec->host_list);
        kref_init(&newrec->ref);
        ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-       struct nvmet_fc_tgt_assoc *assoc, *next;
+       struct nvmet_fc_tgt_assoc *assoc;
+       struct hlist_node *next;
        unsigned long flags;
 
        spin_lock_irqsave(&tgtport->lock, flags);
-       list_for_each_entry_safe(assoc, next,
+       hlist_for_each_entry_safe(assoc, next,
                                &tgtport->assoc_list, a_list) {
                if (!nvmet_fc_tgt_a_get(assoc))
                        continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port 
*target_port,
                        void *hosthandle)
 {
        struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-       struct nvmet_fc_tgt_assoc *assoc, *next;
+       struct nvmet_fc_tgt_assoc *assoc;
+       struct hlist_node *next;
        unsigned long flags;
        bool noassoc = true;
 
        spin_lock_irqsave(&tgtport->lock, flags);
-       list_for_each_entry_safe(assoc, next,
+       hlist_for_each_entry_safe(assoc, next,
                                &tgtport->assoc_list, a_list) {
                if (!assoc->hostport ||
                    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
                spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
                spin_lock_irqsave(&tgtport->lock, flags);
-               list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+               hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
                        queue = assoc->queues[0];
                        if (queue && queue->nvme_sq.ctrl == ctrl) {
                                if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2

Reply via email to