On Fri, Mar 24, 2023 at 3:54 AM Eugenio Pérez <epere...@redhat.com> wrote: > > Evaluating it at start time instead of initialization time may make the > guest capable of dynamically adding or removing migration blockers. > > Also, moving to initialization reduces the number of ioctls in the > migration, reducing failure possibilities. > > As a drawback we need to check for CVQ isolation twice: one time with no > MQ negotiated and another one acking it, as long as the device supports > it. This is because Vring ASID / group management is based on vq > indexes, but we don't know the index of CVQ before negotiating MQ.
We need to fail if we see a device that can isolate cvq without MQ but not with MQ. > > Signed-off-by: Eugenio Pérez <epere...@redhat.com> > --- > v2: Take out the reset of the device from vhost_vdpa_cvq_is_isolated > --- > net/vhost-vdpa.c | 194 ++++++++++++++++++++++++++++++++++++----------- > 1 file changed, 151 insertions(+), 43 deletions(-) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 4397c0d4b3..db2c9afcb3 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -43,6 +43,13 @@ typedef struct VhostVDPAState { > > /* The device always have SVQ enabled */ > bool always_svq; > + > + /* The device can isolate CVQ in its own ASID if MQ is negotiated */ > + bool cvq_isolated_mq; > + > + /* The device can isolate CVQ in its own ASID if MQ is not negotiated */ > + bool cvq_isolated; As stated above, if we need a device that cvq_isolated_mq^cvq_isolated == true, we need to fail. This may reduce the complexity of the code? Thanks > + > bool started; > } VhostVDPAState; > > @@ -361,15 +368,8 @@ static NetClientInfo net_vhost_vdpa_info = { > .check_peer_type = vhost_vdpa_check_peer_type, > }; > > -/** > - * Get vring virtqueue group > - * > - * @device_fd vdpa device fd > - * @vq_index Virtqueue index > - * > - * Return -errno in case of error, or vq group if success. > - */ > -static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index) > +static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, > + Error **errp) > { > struct vhost_vring_state state = { > .index = vq_index, > @@ -378,8 +378,7 @@ static int64_t vhost_vdpa_get_vring_group(int device_fd, > unsigned vq_index) > > if (unlikely(r < 0)) { > r = -errno; > - error_report("Cannot get VQ %u group: %s", vq_index, > - g_strerror(errno)); > + error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); > return r; > } > > @@ -479,9 +478,9 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) > { > VhostVDPAState *s, *s0; > struct vhost_vdpa *v; > - uint64_t backend_features; > int64_t cvq_group; > - int cvq_index, r; > + int r; > + Error *err = NULL; > > assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); > > @@ -501,42 +500,29 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) > /* > * If we early return in these cases SVQ will not be enabled. The > migration > * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. > - * > - * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev > - * yet. > */ > - r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); > - if (unlikely(r < 0)) { > - error_report("Cannot get vdpa backend_features: %s(%d)", > - g_strerror(errno), errno); > - return -1; > - } > - if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) || > - !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { > + if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { > return 0; > } > > - /* > - * Check if all the virtqueues of the virtio device are in a different vq > - * than the last vq. VQ group of last group passed in cvq_group. > - */ > - cvq_index = v->dev->vq_index_end - 1; > - cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index); > - if (unlikely(cvq_group < 0)) { > - return cvq_group; > - } > - for (int i = 0; i < cvq_index; ++i) { > - int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i); > - > - if (unlikely(group < 0)) { > - return group; > + if (v->dev->features & BIT_ULL(VIRTIO_NET_F_MQ)) { > + if (!s->cvq_isolated_mq) { > + return 0; > } > - > - if (group == cvq_group) { > + } else { > + if (!s->cvq_isolated) { > return 0; > } > } > > + cvq_group = vhost_vdpa_get_vring_group(v->device_fd, > + v->dev->vq_index_end - 1, > + &err); > + if (unlikely(cvq_group < 0)) { > + error_report_err(err); > + return cvq_group; > + } > + > r = vhost_vdpa_set_address_space_id(v, cvq_group, > VHOST_VDPA_NET_CVQ_ASID); > if (unlikely(r < 0)) { > return r; > @@ -798,6 +784,116 @@ static const VhostShadowVirtqueueOps > vhost_vdpa_net_svq_ops = { > .avail_handler = vhost_vdpa_net_handle_ctrl_avail, > }; > > +/** > + * Probe the device to check control virtqueue is isolated. > + * > + * @device_fd vhost-vdpa file descriptor > + * @features features to negotiate > + * @cvq_index Control vq index > + * > + * Returns -1 in case of error, 0 if false and 1 if true > + */ > +static int vhost_vdpa_cvq_is_isolated(int device_fd, uint64_t features, > + unsigned cvq_index, Error **errp) > +{ > + int64_t cvq_group; > + int r; > + > + r = vhost_vdpa_set_dev_features_fd(device_fd, features); > + if (unlikely(r < 0)) { > + error_setg_errno(errp, -r, "Cannot set device features"); > + return r; > + } > + > + cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); > + if (unlikely(cvq_group < 0)) { > + return cvq_group; > + } > + > + for (int i = 0; i < cvq_index; ++i) { > + int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); > + > + if (unlikely(group < 0)) { > + return group; > + } > + > + if (group == (int64_t)cvq_group) { > + return 0; > + } > + } > + > + return 1; > +} > + > +/** > + * Probe if CVQ is isolated when the device is MQ and when it is not MQ > + * > + * @device_fd The vdpa device fd > + * @features Features offered by the device. > + * @cvq_index The control vq index if mq is negotiated. Ignored > + * otherwise. > + * @cvq_isolated It'll be set to true if cvq is isolated if mq is not > + * negotiated. > + * @cvq_isolated_mq It'll be set to true if cvq is isolated if mq is > + * negotiated. > + * > + * Returns -1 in case of failure > + */ > +static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, > + int cvq_index, bool *cvq_isolated, > + bool *cvq_isolated_mq, Error > **errp) > +{ > + uint64_t backend_features; > + int r; > + > + ERRP_GUARD(); > + > + *cvq_isolated = false; > + *cvq_isolated_mq = false; > + r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); > + if (unlikely(r < 0)) { > + error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); > + return r; > + } > + > + if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { > + return 0; > + } > + > + r = vhost_vdpa_cvq_is_isolated(device_fd, > + features & ~BIT_ULL(VIRTIO_NET_F_MQ), 2, > + errp); > + if (unlikely(r < 0)) { > + if (r == -ENOTSUP) { > + /* > + * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa > + * frontend support ASID but the parent driver does not. The CVQ > + * cannot be isolated in this case. > + */ > + error_free(*errp); > + *errp = NULL; > + return 0; > + } > + > + return r; > + } > + > + *cvq_isolated = r == 1; > + vhost_vdpa_reset_status_fd(device_fd); > + if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) { > + return 0; > + } > + > + r = vhost_vdpa_cvq_is_isolated(device_fd, features, cvq_index * 2, errp); > + if (unlikely(r < 0)) { > + return r; > + } > + > + *cvq_isolated_mq = r == 1; > + vhost_vdpa_reset_status_fd(device_fd); > + return 0; > +} > + > static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > const char *device, > const char *name, > @@ -807,16 +903,26 @@ static NetClientState > *net_vhost_vdpa_init(NetClientState *peer, > bool is_datapath, > bool svq, > struct vhost_vdpa_iova_range > iova_range, > - uint64_t features) > + uint64_t features, > + Error **errp) > { > NetClientState *nc = NULL; > VhostVDPAState *s; > int ret = 0; > assert(name); > + bool cvq_isolated, cvq_isolated_mq; > + > if (is_datapath) { > nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, > name); > } else { > + ret = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, > + queue_pair_index, &cvq_isolated, > + &cvq_isolated_mq, errp); > + if (unlikely(ret)) { > + return NULL; > + } > + > nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, > device, name); > } > @@ -843,6 +949,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState > *peer, > > s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; > s->vhost_vdpa.shadow_vq_ops_opaque = s; > + s->cvq_isolated = cvq_isolated; > + s->cvq_isolated_mq = cvq_isolated_mq; > > /* > * TODO: We cannot migrate devices with CVQ as there is no way to set > @@ -971,7 +1079,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char > *name, > for (i = 0; i < queue_pairs; i++) { > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, > vdpa_device_fd, i, 2, true, opts->x_svq, > - iova_range, features); > + iova_range, features, errp); > if (!ncs[i]) > goto err; > } > @@ -979,7 +1087,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char > *name, > if (has_cvq) { > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, > vdpa_device_fd, i, 1, false, > - opts->x_svq, iova_range, features); > + opts->x_svq, iova_range, features, errp); > if (!nc) > goto err; > } > -- > 2.31.1 >