On Mon, Feb 13, 2023 at 7:51 AM Si-Wei Liu <si-wei....@oracle.com> wrote: > > > > On 2/8/2023 1:42 AM, Eugenio Pérez wrote: > > Only create iova_tree if and when it is needed. > > > > The cleanup keeps being responsible of last VQ but this change allows it > > to merge both cleanup functions. > > > > Signed-off-by: Eugenio Pérez <epere...@redhat.com> > > Acked-by: Jason Wang <jasow...@redhat.com> > > --- > > net/vhost-vdpa.c | 99 ++++++++++++++++++++++++++++++++++-------------- > > 1 file changed, 71 insertions(+), 28 deletions(-) > > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > > index de5ed8ff22..a9e6c8f28e 100644 > > --- a/net/vhost-vdpa.c > > +++ b/net/vhost-vdpa.c > > @@ -178,13 +178,9 @@ err_init: > > static void vhost_vdpa_cleanup(NetClientState *nc) > > { > > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > > - struct vhost_dev *dev = &s->vhost_net->dev; > > > > qemu_vfree(s->cvq_cmd_out_buffer); > > qemu_vfree(s->status); > > - if (dev->vq_index + dev->nvqs == dev->vq_index_end) { > > - g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); > > - } > > if (s->vhost_net) { > > vhost_net_cleanup(s->vhost_net); > > g_free(s->vhost_net); > > @@ -234,10 +230,64 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, > > const uint8_t *buf, > > return size; > > } > > > > +/** From any vdpa net client, get the netclient of first queue pair */ > > +static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) > > +{ > > + NICState *nic = qemu_get_nic(s->nc.peer); > > + NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); > > + > > + return DO_UPCAST(VhostVDPAState, nc, nc0); > > +} > > + > > +static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) > > +{ > > + struct vhost_vdpa *v = &s->vhost_vdpa; > > + > > + if (v->shadow_vqs_enabled) { > > + v->iova_tree = vhost_iova_tree_new(v->iova_range.first, > > + v->iova_range.last); > > + } > > +} > > + > > +static int vhost_vdpa_net_data_start(NetClientState *nc) > > +{ > > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > > + struct vhost_vdpa *v = &s->vhost_vdpa; > > + > > + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); > > + > > + if (v->index == 0) { > > + vhost_vdpa_net_data_start_first(s); > > + return 0; > > + } > > + > > + if (v->shadow_vqs_enabled) { > > + VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s); > > + v->iova_tree = s0->vhost_vdpa.iova_tree; > > + } > > + > > + return 0; > > +} > > + > > +static void vhost_vdpa_net_client_stop(NetClientState *nc) > > +{ > > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > > + struct vhost_dev *dev; > > + > > + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); > > + > > + dev = s->vhost_vdpa.dev; > > + if (dev->vq_index + dev->nvqs == dev->vq_index_end) { > > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); > > + } > > +} > > + > > static NetClientInfo net_vhost_vdpa_info = { > > .type = NET_CLIENT_DRIVER_VHOST_VDPA, > > .size = sizeof(VhostVDPAState), > > .receive = vhost_vdpa_receive, > > + .start = vhost_vdpa_net_data_start, > > + .stop = vhost_vdpa_net_client_stop, > > .cleanup = vhost_vdpa_cleanup, > > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, > > .has_ufo = vhost_vdpa_has_ufo, > > @@ -351,7 +401,7 @@ dma_map_err: > > > > static int vhost_vdpa_net_cvq_start(NetClientState *nc) > > { > > - VhostVDPAState *s; > > + VhostVDPAState *s, *s0; > > struct vhost_vdpa *v; > > uint64_t backend_features; > > int64_t cvq_group; > > @@ -425,6 +475,15 @@ out: > > return 0; > > } > > > > + s0 = vhost_vdpa_net_first_nc_vdpa(s); > > + if (s0->vhost_vdpa.iova_tree) { > > + /* SVQ is already configured for all virtqueues */ > > + v->iova_tree = s0->vhost_vdpa.iova_tree; > > + } else { > > + v->iova_tree = vhost_iova_tree_new(v->iova_range.first, > > + v->iova_range.last); > I wonder how this case could happen, vhost_vdpa_net_data_start_first() > should've allocated an iova tree on the first data vq. Is zero data vq > ever possible on net vhost-vdpa? >
It's the case of the current qemu master when only CVQ is being shadowed. It's not that "there are no data vq": If that case were possible, CVQ vhost-vdpa state would be s0. The case is that since only CVQ vhost-vdpa is the one being migrated, only CVQ has an iova tree. With this series applied and with no migration running, the case is the same as before: only SVQ gets shadowed. When migration starts, all vqs are migrated, and share iova tree. Thanks! > Thanks, > -Siwei > > + } > > + > > r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, > > vhost_vdpa_net_cvq_cmd_page_len(), false); > > if (unlikely(r < 0)) { > > @@ -449,15 +508,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc) > > if (s->vhost_vdpa.shadow_vqs_enabled) { > > vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); > > vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); > > - if (!s->always_svq) { > > - /* > > - * If only the CVQ is shadowed we can delete this safely. > > - * If all the VQs are shadows this will be needed by the time > > the > > - * device is started again to register SVQ vrings and similar. > > - */ > > - g_clear_pointer(&s->vhost_vdpa.iova_tree, > > vhost_iova_tree_delete); > > - } > > } > > + > > + vhost_vdpa_net_client_stop(nc); > > } > > > > static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, > > @@ -667,8 +720,7 @@ static NetClientState > > *net_vhost_vdpa_init(NetClientState *peer, > > int nvqs, > > bool is_datapath, > > bool svq, > > - struct vhost_vdpa_iova_range > > iova_range, > > - VhostIOVATree *iova_tree) > > + struct vhost_vdpa_iova_range > > iova_range) > > { > > NetClientState *nc = NULL; > > VhostVDPAState *s; > > @@ -690,7 +742,6 @@ static NetClientState > > *net_vhost_vdpa_init(NetClientState *peer, > > s->vhost_vdpa.shadow_vqs_enabled = svq; > > s->vhost_vdpa.iova_range = iova_range; > > s->vhost_vdpa.shadow_data = svq; > > - s->vhost_vdpa.iova_tree = iova_tree; > > if (!is_datapath) { > > s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), > > > > vhost_vdpa_net_cvq_cmd_page_len()); > > @@ -760,7 +811,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const > > char *name, > > uint64_t features; > > int vdpa_device_fd; > > g_autofree NetClientState **ncs = NULL; > > - g_autoptr(VhostIOVATree) iova_tree = NULL; > > struct vhost_vdpa_iova_range iova_range; > > NetClientState *nc; > > int queue_pairs, r, i = 0, has_cvq = 0; > > @@ -812,12 +862,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const > > char *name, > > goto err; > > } > > > > - if (opts->x_svq) { > > - if (!vhost_vdpa_net_valid_svq_features(features, errp)) { > > - goto err_svq; > > - } > > - > > - iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); > > + if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) > > { > > + goto err; > > } > > > > ncs = g_malloc0(sizeof(*ncs) * queue_pairs); > > @@ -825,7 +871,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const > > char *name, > > for (i = 0; i < queue_pairs; i++) { > > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, > > vdpa_device_fd, i, 2, true, > > opts->x_svq, > > - iova_range, iova_tree); > > + iova_range); > > if (!ncs[i]) > > goto err; > > } > > @@ -833,13 +879,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const > > char *name, > > if (has_cvq) { > > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, > > vdpa_device_fd, i, 1, false, > > - opts->x_svq, iova_range, iova_tree); > > + opts->x_svq, iova_range); > > if (!nc) > > goto err; > > } > > > > - /* iova_tree ownership belongs to last NetClientState */ > > - g_steal_pointer(&iova_tree); > > return 0; > > > > err: > > @@ -849,7 +893,6 @@ err: > > } > > } > > > > -err_svq: > > qemu_close(vdpa_device_fd); > > > > return -1; >