Re: [PATCH v2 1/4] vhost: Improve vhost_get_avail_idx() with smp_rmb()
On 4/30/24 04:44, Michael S. Tsirkin wrote: On Mon, Apr 29, 2024 at 08:13:57PM +1000, Gavin Shan wrote: From: "Michael S. Tsirkin" All the callers of vhost_get_avail_idx() are concerned to the memory *with* the memory barrier Thanks, will be corrected in v3. barrier, imposed by smp_rmb() to ensure the order of the available ring entry read and avail_idx read. Improve vhost_get_avail_idx() so that smp_rmb() is executed when the avail_idx is advanced. accessed, not advanced. guest advances it. smp_rmb() is executed only when vp->last_avail_idx != vp->avail_idx. I used 'advanced' to indicate the condition. 'accessed' is also correct since the 'advanced' case included to 'accessed' case. With it, the callers needn't to worry about the memory barrier. No functional change intended. I'd add: As a side benefit, we also validate the index on all paths now, which will hopefully help catch future errors earlier. Note: current code is inconsistent in how it handles errors: some places treat it as an empty ring, others - non empty. This patch does not attempt to change the existing behaviour. Ok, I will integrate this to v3's commit log. Signed-off-by: Michael S. Tsirkin [gshan: repainted vhost_get_avail_idx()] ?repainted? It's just a indicator to say the changes aren't simply copied from [1]. Some follow-up changes are also applied. So it needs to be reviewed. I will drop this in v3. Reviewed-by: Gavin Shan Acked-by: Will Deacon --- drivers/vhost/vhost.c | 106 +- 1 file changed, 42 insertions(+), 64 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 8995730ce0bf..7aa623117aab 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1290,10 +1290,36 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d) mutex_unlock(>vqs[i]->mutex); } -static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, - __virtio16 *idx) +static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq) { - return vhost_get_avail(vq, *idx, >avail->idx); + __virtio16 idx; + int r; + + r = vhost_get_avail(vq, idx, >avail->idx); + if (unlikely(r < 0)) { + vq_err(vq, "Failed to access available index at %p (%d)\n", + >avail->idx, r); + return r; + } + + /* Check it isn't doing very strange thing with available indexes */ + vq->avail_idx = vhost16_to_cpu(vq, idx); + if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) { + vq_err(vq, "Invalid available index change from %u to %u", + vq->last_avail_idx, vq->avail_idx); + return -EINVAL; + } + + /* We're done if there is nothing new */ + if (vq->avail_idx == vq->last_avail_idx) + return 0; + + /* +* We updated vq->avail_idx so we need a memory barrier between +* the index read above and the caller reading avail ring entries. +*/ + smp_rmb(); + return 1; } static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, @@ -2498,38 +2524,17 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, { struct vring_desc desc; unsigned int i, head, found = 0; - u16 last_avail_idx; - __virtio16 avail_idx; + u16 last_avail_idx = vq->last_avail_idx; __virtio16 ring_head; int ret, access; - /* Check it isn't doing very strange things with descriptor numbers. */ - last_avail_idx = vq->last_avail_idx; - if (vq->avail_idx == vq->last_avail_idx) { - if (unlikely(vhost_get_avail_idx(vq, _idx))) { - vq_err(vq, "Failed to access avail idx at %p\n", - >avail->idx); - return -EFAULT; - } - vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - - if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { - vq_err(vq, "Guest moved avail index from %u to %u", - last_avail_idx, vq->avail_idx); - return -EFAULT; - } + ret = vhost_get_avail_idx(vq); + if (unlikely(ret < 0)) + return ret; - /* If there's nothing new since last we looked, return -* invalid. -*/ - if (vq->avail_idx == last_avail_idx) + if (!ret) return vq->num; - - /* Only get avail ring entries after they have been -* exposed by guest. -*/ - smp_rmb(); } /* Grab the next descriptor number they're advertising, and increment @@ -2790,35 +2795,20 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); /* return true if we're sure that
Re: [PATCH v2 1/4] vhost: Improve vhost_get_avail_idx() with smp_rmb()
On Mon, Apr 29, 2024 at 08:13:57PM +1000, Gavin Shan wrote: > From: "Michael S. Tsirkin" > > All the callers of vhost_get_avail_idx() are concerned to the memory *with* the memory barrier > barrier, imposed by smp_rmb() to ensure the order of the available > ring entry read and avail_idx read. > > Improve vhost_get_avail_idx() so that smp_rmb() is executed when > the avail_idx is advanced. accessed, not advanced. guest advances it. > With it, the callers needn't to worry > about the memory barrier. > > No functional change intended. I'd add: As a side benefit, we also validate the index on all paths now, which will hopefully help catch future errors earlier. Note: current code is inconsistent in how it handles errors: some places treat it as an empty ring, others - non empty. This patch does not attempt to change the existing behaviour. > Signed-off-by: Michael S. Tsirkin > [gshan: repainted vhost_get_avail_idx()] ?repainted? > Reviewed-by: Gavin Shan > Acked-by: Will Deacon > --- > drivers/vhost/vhost.c | 106 +- > 1 file changed, 42 insertions(+), 64 deletions(-) > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c > index 8995730ce0bf..7aa623117aab 100644 > --- a/drivers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -1290,10 +1290,36 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d) > mutex_unlock(>vqs[i]->mutex); > } > > -static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, > - __virtio16 *idx) > +static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq) > { > - return vhost_get_avail(vq, *idx, >avail->idx); > + __virtio16 idx; > + int r; > + > + r = vhost_get_avail(vq, idx, >avail->idx); > + if (unlikely(r < 0)) { > + vq_err(vq, "Failed to access available index at %p (%d)\n", > +>avail->idx, r); > + return r; > + } > + > + /* Check it isn't doing very strange thing with available indexes */ > + vq->avail_idx = vhost16_to_cpu(vq, idx); > + if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) { > + vq_err(vq, "Invalid available index change from %u to %u", > +vq->last_avail_idx, vq->avail_idx); > + return -EINVAL; > + } > + > + /* We're done if there is nothing new */ > + if (vq->avail_idx == vq->last_avail_idx) > + return 0; > + > + /* > + * We updated vq->avail_idx so we need a memory barrier between > + * the index read above and the caller reading avail ring entries. > + */ > + smp_rmb(); > + return 1; > } > > static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, > @@ -2498,38 +2524,17 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > { > struct vring_desc desc; > unsigned int i, head, found = 0; > - u16 last_avail_idx; > - __virtio16 avail_idx; > + u16 last_avail_idx = vq->last_avail_idx; > __virtio16 ring_head; > int ret, access; > > - /* Check it isn't doing very strange things with descriptor numbers. */ > - last_avail_idx = vq->last_avail_idx; > - > if (vq->avail_idx == vq->last_avail_idx) { > - if (unlikely(vhost_get_avail_idx(vq, _idx))) { > - vq_err(vq, "Failed to access avail idx at %p\n", > - >avail->idx); > - return -EFAULT; > - } > - vq->avail_idx = vhost16_to_cpu(vq, avail_idx); > - > - if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { > - vq_err(vq, "Guest moved avail index from %u to %u", > - last_avail_idx, vq->avail_idx); > - return -EFAULT; > - } > + ret = vhost_get_avail_idx(vq); > + if (unlikely(ret < 0)) > + return ret; > > - /* If there's nothing new since last we looked, return > - * invalid. > - */ > - if (vq->avail_idx == last_avail_idx) > + if (!ret) > return vq->num; > - > - /* Only get avail ring entries after they have been > - * exposed by guest. > - */ > - smp_rmb(); > } > > /* Grab the next descriptor number they're advertising, and increment > @@ -2790,35 +2795,20 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); > /* return true if we're sure that avaiable ring is empty */ > bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) > { > - __virtio16 avail_idx; > int r; > > if (vq->avail_idx != vq->last_avail_idx) > return false; > > - r = vhost_get_avail_idx(vq, _idx); > - if (unlikely(r)) > - return false; > - > - vq->avail_idx = vhost16_to_cpu(vq, avail_idx); > - if