From: Håkon Bugge <haakon.bu...@oracle.com>

[ Upstream commit 785167a114855c5aa75efca97000e405c2cc85bf ]

When scheduling delayed work to clean up the cache, if the entry already
has been scheduled for deletion, we adjust the delay.

Fixes: 3cf69cc8dbeb ("IB/mlx4: Add CM paravirtualization")
Link: https://lore.kernel.org/r/20200803061941.1139994-7-haakon.bu...@oracle.com
Signed-off-by: Håkon Bugge <haakon.bu...@oracle.com>
Signed-off-by: Jason Gunthorpe <j...@nvidia.com>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 drivers/infiniband/hw/mlx4/cm.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 5dc920fe13269..c8c586c78d071 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -309,6 +309,9 @@ static void schedule_delayed(struct ib_device *ibdev, 
struct id_map_entry *id)
        if (!sriov->is_going_down) {
                id->scheduled_delete = 1;
                schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+       } else if (id->scheduled_delete) {
+               /* Adjust timeout if already scheduled */
+               mod_delayed_work(system_wq, &id->timeout, 
CM_CLEANUP_CACHE_TIMEOUT);
        }
        spin_unlock_irqrestore(&sriov->going_down_lock, flags);
        spin_unlock(&sriov->id_map_lock);
-- 
2.25.1



Reply via email to