Add per-ucontext list tracking for WQ objects. Each WQ is added to
the ucontext's wq_list on creation and removed on destruction. This
enables iterating over all WQs belonging to a ucontext for service
reset cleanup.

Signed-off-by: Long Li <[email protected]>
---
 drivers/infiniband/hw/mana/main.c    |  1 +
 drivers/infiniband/hw/mana/mana_ib.h |  2 ++
 drivers/infiniband/hw/mana/wq.c      | 20 ++++++++++++++++++++
 3 files changed, 23 insertions(+)

diff --git a/drivers/infiniband/hw/mana/main.c 
b/drivers/infiniband/hw/mana/main.c
index 214c1d4e1548..e6da5c8400f4 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -244,6 +244,7 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
        mutex_init(&ucontext->lock);
        INIT_LIST_HEAD(&ucontext->pd_list);
        INIT_LIST_HEAD(&ucontext->cq_list);
+       INIT_LIST_HEAD(&ucontext->wq_list);
 
        mutex_lock(&mdev->ucontext_lock);
        list_add_tail(&ucontext->dev_list, &mdev->ucontext_list);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h 
b/drivers/infiniband/hw/mana/mana_ib.h
index 8d3edf7ba335..96b5a13470ae 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -94,6 +94,7 @@ struct mana_ib_wq {
        int wqe;
        u32 wq_buf_size;
        mana_handle_t rx_object;
+       struct list_head ucontext_list;
 };
 
 struct mana_ib_pd {
@@ -207,6 +208,7 @@ struct mana_ib_ucontext {
        struct mutex lock;
        struct list_head pd_list;
        struct list_head cq_list;
+       struct list_head wq_list;
 };
 
 struct mana_ib_rwq_ind_table {
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 6206244f762e..1af9869933aa 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -41,6 +41,17 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
        wq->wqe = init_attr->max_wr;
        wq->wq_buf_size = ucmd.wq_buf_size;
        wq->rx_object = INVALID_MANA_HANDLE;
+
+       INIT_LIST_HEAD(&wq->ucontext_list);
+       if (udata) {
+               struct mana_ib_ucontext *mana_ucontext =
+                       rdma_udata_to_drv_context(udata,
+                               struct mana_ib_ucontext, ibucontext);
+               mutex_lock(&mana_ucontext->lock);
+               list_add_tail(&wq->ucontext_list, &mana_ucontext->wq_list);
+               mutex_unlock(&mana_ucontext->lock);
+       }
+
        return &wq->ibwq;
 
 err_free_wq:
@@ -64,6 +75,15 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata 
*udata)
 
        mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
 
+       if (udata) {
+               struct mana_ib_ucontext *mana_ucontext =
+                       rdma_udata_to_drv_context(udata,
+                               struct mana_ib_ucontext, ibucontext);
+               mutex_lock(&mana_ucontext->lock);
+               list_del_init(&wq->ucontext_list);
+               mutex_unlock(&mana_ucontext->lock);
+       }
+
        mana_ib_destroy_queue(mdev, &wq->queue);
 
        kfree(wq);
-- 
2.43.0


Reply via email to