To avoid access stale memory region cache after reset, this patch check the existence of virtqueue pfn for all exported virtqueue access helpers before trying to use them.
Cc: Cornelia Huck <cornelia.h...@de.ibm.com> Cc: Paolo Bonzini <pbonz...@redhat.com> Signed-off-by: Jason Wang <jasow...@redhat.com> --- hw/virtio/virtio.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index efce4b3..76cc81b 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -322,6 +322,10 @@ static int virtio_queue_empty_rcu(VirtQueue *vq) return 0; } + if (unlikely(!vq->vring.avail)) { + return 0; + } + return vring_avail_idx(vq) == vq->last_avail_idx; } @@ -333,6 +337,10 @@ int virtio_queue_empty(VirtQueue *vq) return 0; } + if (unlikely(!vq->vring.avail)) { + return 0; + } + rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); @@ -431,6 +439,10 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, return; } + if (unlikely(!vq->vring.used)) { + return; + } + idx = (idx + vq->used_idx) % vq->vring.num; uelem.id = elem->index; @@ -448,6 +460,10 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count) return; } + if (unlikely(!vq->vring.used)) { + return; + } + /* Make sure buffer is written before we update index. */ smp_wmb(); trace_virtqueue_flush(vq, count); @@ -546,6 +562,11 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, int64_t len = 0; int rc; + if (unlikely(!vq->vring.desc)) { + *in_bytes = *out_bytes = 0; + return; + } + rcu_read_lock(); idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; -- 2.7.4