After a service reset, firmware handles for PD, CQ, WQ, QP, and MR
are set to INVALID_MANA_HANDLE by the reset notification path.

Check for INVALID_MANA_HANDLE in each destroy callback before issuing
firmware destroy commands. When a handle is invalid, skip the firmware
call and proceed directly to kernel resource cleanup (umem, queues,
memory). This avoids sending stale handles to firmware after reset.

Affected callbacks:
  - mana_ib_dealloc_pd: skip mana_ib_gd_destroy_pd
  - mana_ib_destroy_cq: skip mana_ib_gd_destroy_cq and queue destroy
  - mana_ib_destroy_wq: skip mana_ib_destroy_queue
  - mana_ib_destroy_qp_rss: skip mana_destroy_wq_obj per WQ
  - mana_ib_destroy_qp_raw: skip mana_destroy_wq_obj
  - mana_ib_dereg_mr: skip mana_ib_gd_destroy_mr

Signed-off-by: Long Li <[email protected]>
---
 drivers/infiniband/hw/mana/cq.c   | 10 ++++++----
 drivers/infiniband/hw/mana/main.c | 12 +++++++++---
 drivers/infiniband/hw/mana/mr.c   |  8 +++++---
 drivers/infiniband/hw/mana/qp.c   |  9 ++++++---
 4 files changed, 26 insertions(+), 13 deletions(-)

diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index b054684b8de7..315301bccb97 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -143,10 +143,12 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct 
ib_udata *udata)
 
        mana_ib_remove_cq_cb(mdev, cq);
 
-       /* Ignore return code as there is not much we can do about it.
-        * The error message is printed inside.
-        */
-       mana_ib_gd_destroy_cq(mdev, cq);
+       if (cq->cq_handle != INVALID_MANA_HANDLE) {
+               /* Ignore return code as there is not much we can do about it.
+                * The error message is printed inside.
+                */
+               mana_ib_gd_destroy_cq(mdev, cq);
+       }
 
        mana_ib_destroy_queue(mdev, &cq->queue);
 
diff --git a/drivers/infiniband/hw/mana/main.c 
b/drivers/infiniband/hw/mana/main.c
index 61ce30aa9cb2..d60205184dba 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -147,6 +147,9 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata 
*udata)
                mutex_unlock(&mana_ucontext->lock);
        }
 
+       if (pd->pd_handle == INVALID_MANA_HANDLE)
+               return 0;
+
        mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
                             sizeof(resp));
 
@@ -280,9 +283,12 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext 
*ibcontext)
        list_del_init(&mana_ucontext->dev_list);
        mutex_unlock(&mdev->ucontext_lock);
 
-       ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
-       if (ret)
-               ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
+       if (mana_ucontext->doorbell != INVALID_DOORBELL) {
+               ret = mana_gd_destroy_doorbell_page(gc, 
mana_ucontext->doorbell);
+               if (ret)
+                       ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n",
+                                 ret);
+       }
 }
 
 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum 
gdma_queue_type type,
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 7189ccd41576..75bc2a9c366a 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -336,9 +336,11 @@ int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata 
*udata)
                mutex_unlock(&mana_ucontext->lock);
        }
 
-       err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
-       if (err)
-               return err;
+       if (mr->mr_handle != INVALID_MANA_HANDLE) {
+               err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
+               if (err)
+                       return err;
+       }
 
        if (mr->umem)
                ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index d590aca9b93a..76d59addb645 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -846,9 +846,11 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
        for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
                ibwq = ind_tbl->ind_tbl[i];
                wq = container_of(ibwq, struct mana_ib_wq, ibwq);
-               ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
+               ibdev_dbg(&mdev->ib_dev,
+                         "destroying wq->rx_object %llu\n",
                          wq->rx_object);
-               mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
+               if (wq->rx_object != INVALID_MANA_HANDLE)
+                       mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
        }
 
        return 0;
@@ -867,7 +869,8 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, 
struct ib_udata *udata)
        mpc = netdev_priv(ndev);
        pd = container_of(ibpd, struct mana_ib_pd, ibpd);
 
-       mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
+       if (qp->qp_handle != INVALID_MANA_HANDLE)
+               mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
 
        mana_ib_destroy_queue(mdev, &qp->raw_sq);
 
-- 
2.43.0


Reply via email to