On Wed, Oct 13, 2021 at 5:54 AM Jason Wang <jasow...@redhat.com> wrote:
>
>
> 在 2021/10/1 下午3:05, Eugenio Pérez 写道:
> > It reports the shadow virtqueue address from qemu virtual address space
>
>
> I think both the title and commit log needs to more tweaks. Looking at
> the codes, what id does is actually introduce vring into svq.
>

Right, this commit evolved a little bit providing more functionality
and it is not reflected in the commit message. I will expand it.

>
> >
> > Signed-off-by: Eugenio Pérez <epere...@redhat.com>
> > ---
> >   hw/virtio/vhost-shadow-virtqueue.h |  4 +++
> >   hw/virtio/vhost-shadow-virtqueue.c | 50 ++++++++++++++++++++++++++++++
> >   2 files changed, 54 insertions(+)
> >
> > diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
> > b/hw/virtio/vhost-shadow-virtqueue.h
> > index 237cfceb9c..2df3d117f5 100644
> > --- a/hw/virtio/vhost-shadow-virtqueue.h
> > +++ b/hw/virtio/vhost-shadow-virtqueue.h
> > @@ -16,6 +16,10 @@ typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
> >
> >   EventNotifier *vhost_svq_get_svq_call_notifier(VhostShadowVirtqueue *svq);
> >   void vhost_svq_set_guest_call_notifier(VhostShadowVirtqueue *svq, int 
> > call_fd);
> > +void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
> > +                              struct vhost_vring_addr *addr);
> > +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
> > +size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
> >
> >   bool vhost_svq_start(struct vhost_dev *dev, unsigned idx,
> >                        VhostShadowVirtqueue *svq);
> > diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
> > b/hw/virtio/vhost-shadow-virtqueue.c
> > index 3fe129cf63..5c1899f6af 100644
> > --- a/hw/virtio/vhost-shadow-virtqueue.c
> > +++ b/hw/virtio/vhost-shadow-virtqueue.c
> > @@ -18,6 +18,9 @@
> >
> >   /* Shadow virtqueue to relay notifications */
> >   typedef struct VhostShadowVirtqueue {
> > +    /* Shadow vring */
> > +    struct vring vring;
> > +
> >       /* Shadow kick notifier, sent to vhost */
> >       EventNotifier kick_notifier;
> >       /* Shadow call notifier, sent to vhost */
> > @@ -38,6 +41,9 @@ typedef struct VhostShadowVirtqueue {
> >
> >       /* Virtio queue shadowing */
> >       VirtQueue *vq;
> > +
> > +    /* Virtio device */
> > +    VirtIODevice *vdev;
> >   } VhostShadowVirtqueue;
> >
> >   /* Forward guest notifications */
> > @@ -93,6 +99,35 @@ void 
> > vhost_svq_set_guest_call_notifier(VhostShadowVirtqueue *svq, int call_fd)
> >       event_notifier_init_fd(&svq->guest_call_notifier, call_fd);
> >   }
> >
> > +/*
> > + * Get the shadow vq vring address.
> > + * @svq Shadow virtqueue
> > + * @addr Destination to store address
> > + */
> > +void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
> > +                              struct vhost_vring_addr *addr)
> > +{
> > +    addr->desc_user_addr = (uint64_t)svq->vring.desc;
> > +    addr->avail_user_addr = (uint64_t)svq->vring.avail;
> > +    addr->used_user_addr = (uint64_t)svq->vring.used;
> > +}
> > +
> > +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
> > +{
> > +    uint16_t vq_idx = virtio_get_queue_index(svq->vq);
> > +    size_t desc_size = virtio_queue_get_desc_size(svq->vdev, vq_idx);
> > +    size_t avail_size = virtio_queue_get_avail_size(svq->vdev, vq_idx);
> > +
> > +    return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size);
>
>
> Is this round up required by the spec?
>

No, I was trying to avoid that more qemu data get exposed to the
device because of mapping at page granularity, in case data gets
allocated after some region. I will expand with a comment, but if
there are other ways to achieve or it is not needed please let me
know!

>
> > +}
> > +
> > +size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
> > +{
> > +    uint16_t vq_idx = virtio_get_queue_index(svq->vq);
> > +    size_t used_size = virtio_queue_get_used_size(svq->vdev, vq_idx);
> > +    return ROUND_UP(used_size, qemu_real_host_page_size);
> > +}
> > +
> >   /*
> >    * Restore the vhost guest to host notifier, i.e., disables svq effect.
> >    */
> > @@ -178,6 +213,10 @@ void vhost_svq_stop(struct vhost_dev *dev, unsigned 
> > idx,
> >   VhostShadowVirtqueue *vhost_svq_new(struct vhost_dev *dev, int idx)
> >   {
> >       int vq_idx = dev->vq_index + idx;
> > +    unsigned num = virtio_queue_get_num(dev->vdev, vq_idx);
> > +    size_t desc_size = virtio_queue_get_desc_size(dev->vdev, vq_idx);
> > +    size_t driver_size;
> > +    size_t device_size;
> >       g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 
> > 1);
> >       int r;
> >
> > @@ -196,6 +235,15 @@ VhostShadowVirtqueue *vhost_svq_new(struct vhost_dev 
> > *dev, int idx)
> >       }
> >
> >       svq->vq = virtio_get_queue(dev->vdev, vq_idx);
> > +    svq->vdev = dev->vdev;
> > +    driver_size = vhost_svq_driver_area_size(svq);
> > +    device_size = vhost_svq_device_area_size(svq);
> > +    svq->vring.num = num;
> > +    svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size);
> > +    svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
> > +    memset(svq->vring.desc, 0, driver_size);
>
>
> Any reason for using the contiguous area for both desc and avail?
>

No special reason, it can be splitted but if we maintain the
page-width padding it could save memory, iotlb entries, etc. Not like
it's going to be a big difference but still.

Thanks!

> Thanks
>
>
> > +    svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
> > +    memset(svq->vring.used, 0, device_size);
> >       event_notifier_set_handler(&svq->call_notifier,
> >                                  vhost_svq_handle_call);
> >       return g_steal_pointer(&svq);
> > @@ -215,5 +263,7 @@ void vhost_svq_free(VhostShadowVirtqueue *vq)
> >       event_notifier_cleanup(&vq->kick_notifier);
> >       event_notifier_set_handler(&vq->call_notifier, NULL);
> >       event_notifier_cleanup(&vq->call_notifier);
> > +    qemu_vfree(vq->vring.desc);
> > +    qemu_vfree(vq->vring.used);
> >       g_free(vq);
> >   }
>


Reply via email to