The mutex is named like it is supposed to protect only the mkey but in
reality it is a global lock for all mr resources.

Shift the mutex to it's rightful location (struct mlx5_vdpa_dev) and
give it a more appropriate name.

Signed-off-by: Dragos Tatulea <dtatu...@nvidia.com>
---
 drivers/vdpa/mlx5/core/mlx5_vdpa.h |  4 ++--
 drivers/vdpa/mlx5/core/mr.c        | 13 +++++++------
 drivers/vdpa/mlx5/core/resources.c |  6 +++---
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h 
b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 01d4ee58ccb1..9c6ac42c21e1 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -34,8 +34,6 @@ struct mlx5_vdpa_mr {
        /* state of dvq mr */
        bool initialized;
 
-       /* serialize mkey creation and destruction */
-       struct mutex mkey_mtx;
        bool user_mr;
 };
 
@@ -94,6 +92,8 @@ struct mlx5_vdpa_dev {
        u32 generation;
 
        struct mlx5_vdpa_mr mr;
+       /* serialize mr access */
+       struct mutex mr_mtx;
        struct mlx5_control_vq cvq;
        struct workqueue_struct *wq;
        unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 6f29e8eaabb1..abd6a6fb122f 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -509,11 +509,11 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev 
*mvdev, struct mlx5_vdpa_
 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
                          struct mlx5_vdpa_mr *mr)
 {
-       mutex_lock(&mr->mkey_mtx);
+       mutex_lock(&mvdev->mr_mtx);
 
        _mlx5_vdpa_destroy_mr(mvdev, mr);
 
-       mutex_unlock(&mr->mkey_mtx);
+       mutex_unlock(&mvdev->mr_mtx);
 }
 
 void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
@@ -550,9 +550,10 @@ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
 {
        int err;
 
-       mutex_lock(&mvdev->mr.mkey_mtx);
+       mutex_lock(&mvdev->mr_mtx);
        err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
-       mutex_unlock(&mvdev->mr.mkey_mtx);
+       mutex_unlock(&mvdev->mr_mtx);
+
        return err;
 }
 
@@ -563,14 +564,14 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, 
struct vhost_iotlb *io
        int err = 0;
 
        *change_map = false;
-       mutex_lock(&mr->mkey_mtx);
+       mutex_lock(&mvdev->mr_mtx);
        if (mr->initialized) {
                mlx5_vdpa_info(mvdev, "memory map update\n");
                *change_map = true;
        }
        if (!*change_map)
                err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
-       mutex_unlock(&mr->mkey_mtx);
+       mutex_unlock(&mvdev->mr_mtx);
 
        return err;
 }
diff --git a/drivers/vdpa/mlx5/core/resources.c 
b/drivers/vdpa/mlx5/core/resources.c
index d5a59c9035fb..5c5a41b64bfc 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
                mlx5_vdpa_warn(mvdev, "resources already allocated\n");
                return -EINVAL;
        }
-       mutex_init(&mvdev->mr.mkey_mtx);
+       mutex_init(&mvdev->mr_mtx);
        res->uar = mlx5_get_uars_page(mdev);
        if (IS_ERR(res->uar)) {
                err = PTR_ERR(res->uar);
@@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
 err_uctx:
        mlx5_put_uars_page(mdev, res->uar);
 err_uars:
-       mutex_destroy(&mvdev->mr.mkey_mtx);
+       mutex_destroy(&mvdev->mr_mtx);
        return err;
 }
 
@@ -318,6 +318,6 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
        dealloc_pd(mvdev, res->pdn, res->uid);
        destroy_uctx(mvdev, res->uid);
        mlx5_put_uars_page(mvdev->mdev, res->uar);
-       mutex_destroy(&mvdev->mr.mkey_mtx);
+       mutex_destroy(&mvdev->mr_mtx);
        res->valid = false;
 }
-- 
2.41.0

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to