Implement vdpa vq and device resume if capability detected. Add support
for suspend -> ready state change.

Reviewed-by: Gal Pressman <g...@nvidia.com>
Acked-by: Eugenio PĂ©rez <epere...@redhat.com>
Signed-off-by: Dragos Tatulea <dtatu...@nvidia.com>
---
 drivers/vdpa/mlx5/net/mlx5_vnet.c | 69 +++++++++++++++++++++++++++----
 1 file changed, 62 insertions(+), 7 deletions(-)

diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 1e08a8805640..f8f088cced50 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1170,7 +1170,12 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, 
struct mlx5_vdpa_virtqueu
        return err;
 }
 
-static bool is_valid_state_change(int oldstate, int newstate)
+static bool is_resumable(struct mlx5_vdpa_net *ndev)
+{
+       return ndev->mvdev.vdev.config->resume;
+}
+
+static bool is_valid_state_change(int oldstate, int newstate, bool resumable)
 {
        switch (oldstate) {
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
@@ -1178,6 +1183,7 @@ static bool is_valid_state_change(int oldstate, int 
newstate)
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
                return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+               return resumable ? newstate == 
MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false;
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
        default:
                return false;
@@ -1200,6 +1206,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
 {
        int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
        u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
+       bool state_change = false;
        void *obj_context;
        void *cmd_hdr;
        void *in;
@@ -1211,9 +1218,6 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
        if (!modifiable_virtqueue_fields(mvq))
                return -EINVAL;
 
-       if (!is_valid_state_change(mvq->fw_state, state))
-               return -EINVAL;
-
        in = kzalloc(inlen, GFP_KERNEL);
        if (!in)
                return -ENOMEM;
@@ -1226,17 +1230,29 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
        MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
 
        obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
-       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
+               if (!is_valid_state_change(mvq->fw_state, state, 
is_resumable(ndev))) {
+                       err = -EINVAL;
+                       goto done;
+               }
+
                MLX5_SET(virtio_net_q_object, obj_context, state, state);
+               state_change = true;
+       }
 
        MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, 
mvq->modified_fields);
        err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
-       kfree(in);
-       if (!err)
+       if (err)
+               goto done;
+
+       if (state_change)
                mvq->fw_state = state;
 
        mvq->modified_fields = 0;
 
+done:
+       kfree(in);
        return err;
 }
 
@@ -1430,6 +1446,24 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
                suspend_vq(ndev, &ndev->vqs[i]);
 }
 
+static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue 
*mvq)
+{
+       if (!mvq->initialized || !is_resumable(ndev))
+               return;
+
+       if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)
+               return;
+
+       if (modify_virtqueue_state(ndev, mvq, 
MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
+               mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq 
%u\n", mvq->index);
+}
+
+static void resume_vqs(struct mlx5_vdpa_net *ndev)
+{
+       for (int i = 0; i < ndev->mvdev.max_vqs; i++)
+               resume_vq(ndev, &ndev->vqs[i]);
+}
+
 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue 
*mvq)
 {
        if (!mvq->initialized)
@@ -3261,6 +3295,23 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
        return 0;
 }
 
+static int mlx5_vdpa_resume(struct vdpa_device *vdev)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev;
+
+       ndev = to_mlx5_vdpa_ndev(mvdev);
+
+       mlx5_vdpa_info(mvdev, "resuming device\n");
+
+       down_write(&ndev->reslock);
+       mvdev->suspended = false;
+       resume_vqs(ndev);
+       register_link_notifier(ndev);
+       up_write(&ndev->reslock);
+       return 0;
+}
+
 static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
                               unsigned int asid)
 {
@@ -3317,6 +3368,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .get_vq_dma_dev = mlx5_get_vq_dma_dev,
        .free = mlx5_vdpa_free,
        .suspend = mlx5_vdpa_suspend,
+       .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
 };
 
 static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -3688,6 +3740,9 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
                mgtdev->vdpa_ops.get_vq_desc_group = NULL;
 
+       if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported))
+               mgtdev->vdpa_ops.resume = NULL;
+
        err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
        if (err)
                goto reg_err;
-- 
2.43.0


Reply via email to