Use the dedicated suspend_vqs() function instead.

Reviewed-by: Cosmin Ratiu <cra...@nvidia.com>
Reviewed-by: Eugenio PĂ©rez <epere...@redhat.com>
Signed-off-by: Dragos Tatulea <dtatu...@nvidia.com>
---
 drivers/vdpa/mlx5/net/mlx5_vnet.c | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 51630b1935f4..eca6f68c2eda 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3355,17 +3355,12 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
-       struct mlx5_vdpa_virtqueue *mvq;
-       int i;
 
        mlx5_vdpa_info(mvdev, "suspending device\n");
 
        down_write(&ndev->reslock);
        unregister_link_notifier(ndev);
-       for (i = 0; i < ndev->cur_num_vqs; i++) {
-               mvq = &ndev->vqs[i];
-               suspend_vq(ndev, mvq);
-       }
+       suspend_vqs(ndev);
        mlx5_vdpa_cvq_suspend(mvdev);
        mvdev->suspended = true;
        up_write(&ndev->reslock);

-- 
2.45.1


Reply via email to