Pull out common vring attributes from vhost_virtqueue to a new
struct vring_host. This allows for reuse of data definitions
between vhost and virtio queue when host-side virtio queue is
introduced. Also unsigned long is replaced with ulong a couple
of places.

Signed-off-by: Sjur Brændeland <sjur.brandel...@stericsson.com>
---
 drivers/vhost/net.c         |    4 +-
 drivers/vhost/vhost.c       |  213 +++++++++++++++++++++++--------------------
 drivers/vhost/vhost.h       |   14 +---
 include/linux/virtio_ring.h |   13 +++
 4 files changed, 130 insertions(+), 114 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 072cbba..8fc1869 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -182,7 +182,7 @@ static void handle_tx(struct vhost_net *net)
                if (unlikely(head < 0))
                        break;
                /* Nothing new?  Wait for eventfd to tell us they refilled. */
-               if (head == vq->num) {
+               if (head == vq->hst.vr.num) {
                        int num_pends;
 
                        wmem = atomic_read(&sock->sk->sk_wmem_alloc);
@@ -329,7 +329,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
                d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
                                      ARRAY_SIZE(vq->iov) - seg, &out,
                                      &in, log, log_num);
-               if (d == vq->num) {
+               if (d == vq->hst.vr.num) {
                        r = 0;
                        goto err;
                }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 99ac2cb..0a676f1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/eventfd.h>
+#include <linux/types.h>
 #include <linux/vhost.h>
 #include <linux/virtio_net.h>
 #include <linux/mm.h>
@@ -39,8 +40,10 @@ enum {
 
 static unsigned vhost_zcopy_mask __read_mostly;
 
-#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
-#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+#define vhost_used_event(vq) ((u16 __user *) \
+       &vq->hst.vr.avail->ring[vq->hst.vr.num])
+#define vhost_avail_event(vq) ((u16 __user *)\
+       &vq->hst.vr.used->ring[vq->hst.vr.num])
 
 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
                            poll_table *pt)
@@ -57,7 +60,7 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned 
mode, int sync,
 {
        struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 
-       if (!((unsigned long)key & poll->mask))
+       if (!((ulong)key & poll->mask))
                return 0;
 
        vhost_poll_queue(poll);
@@ -75,7 +78,7 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t 
fn)
 
 /* Init poll structure */
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
-                    unsigned long mask, struct vhost_dev *dev)
+                    ulong mask, struct vhost_dev *dev)
 {
        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
        init_poll_funcptr(&poll->table, vhost_poll_func);
@@ -89,7 +92,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t 
fn,
  * keep a reference to a file until after vhost_poll_stop is called. */
 void vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
-       unsigned long mask;
+       ulong mask;
 
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
@@ -139,7 +142,7 @@ void vhost_poll_flush(struct vhost_poll *poll)
 
 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 {
-       unsigned long flags;
+       ulong flags;
 
        spin_lock_irqsave(&dev->work_lock, flags);
        if (list_empty(&work->node)) {
@@ -158,13 +161,13 @@ void vhost_poll_queue(struct vhost_poll *poll)
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
-       vq->num = 1;
-       vq->desc = NULL;
-       vq->avail = NULL;
-       vq->used = NULL;
-       vq->last_avail_idx = 0;
-       vq->avail_idx = 0;
-       vq->last_used_idx = 0;
+       vq->hst.vr.num = 1;
+       vq->hst.vr.desc = NULL;
+       vq->hst.vr.avail = NULL;
+       vq->hst.vr.used = NULL;
+       vq->hst.last_avail_idx = 0;
+       vq->hst.avail_idx = 0;
+       vq->hst.last_used_idx = 0;
        vq->signalled_used = 0;
        vq->signalled_used_valid = false;
        vq->used_flags = 0;
@@ -489,13 +492,13 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
        dev->mm = NULL;
 }
 
-static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
+static int log_access_ok(void __user *log_base, u64 addr, ulong sz)
 {
        u64 a = addr / VHOST_PAGE_SIZE / 8;
 
        /* Make sure 64 bit math will not overflow. */
-       if (a > ULONG_MAX - (unsigned long)log_base ||
-           a + (unsigned long)log_base > ULONG_MAX)
+       if (a > ULONG_MAX - (ulong)log_base ||
+           a + (ulong)log_base > ULONG_MAX)
                return 0;
 
        return access_ok(VERIFY_WRITE, log_base + a,
@@ -513,7 +516,7 @@ static int vq_memory_access_ok(void __user *log_base, 
struct vhost_memory *mem,
 
        for (i = 0; i < mem->nregions; ++i) {
                struct vhost_memory_region *m = mem->regions + i;
-               unsigned long a = m->userspace_addr;
+               ulong a = m->userspace_addr;
                if (m->memory_size > ULONG_MAX)
                        return 0;
                else if (!access_ok(VERIFY_WRITE, (void __user *)a,
@@ -587,22 +590,24 @@ static int vq_log_access_ok(struct vhost_dev *d, struct 
vhost_virtqueue *vq,
        return vq_memory_access_ok(log_base, mp,
                            vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
                (!vq->log_used || log_access_ok(log_base, vq->log_addr,
-                                       sizeof *vq->used +
-                                       vq->num * sizeof *vq->used->ring + s));
+                                       sizeof *vq->hst.vr.used +
+                                       vq->hst.vr.num *
+                                       sizeof *vq->hst.vr.used->ring + s));
 }
 
 /* Can we start vq? */
 /* Caller should have vq mutex and device mutex */
 int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 {
-       return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
+       return vq_access_ok(vq->dev, vq->hst.vr.num, vq->hst.vr.desc,
+                           vq->hst.vr.avail, vq->hst.vr.used) &&
                vq_log_access_ok(vq->dev, vq, vq->log_base);
 }
 
 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user 
*m)
 {
        struct vhost_memory mem, *newmem, *oldmem;
-       unsigned long size = offsetof(struct vhost_memory, regions);
+       ulong size = offsetof(struct vhost_memory, regions);
 
        if (copy_from_user(&mem, m, size))
                return -EFAULT;
@@ -673,7 +678,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, 
void __user *argp)
                        r = -EINVAL;
                        break;
                }
-               vq->num = s.num;
+               vq->hst.vr.num = s.num;
                break;
        case VHOST_SET_VRING_BASE:
                /* Moving base with an active backend?
@@ -690,13 +695,13 @@ static long vhost_set_vring(struct vhost_dev *d, int 
ioctl, void __user *argp)
                        r = -EINVAL;
                        break;
                }
-               vq->last_avail_idx = s.num;
+               vq->hst.last_avail_idx = s.num;
                /* Forget the cached index value. */
-               vq->avail_idx = vq->last_avail_idx;
+               vq->hst.avail_idx = vq->hst.last_avail_idx;
                break;
        case VHOST_GET_VRING_BASE:
                s.index = idx;
-               s.num = vq->last_avail_idx;
+               s.num = vq->hst.last_avail_idx;
                if (copy_to_user(argp, &s, sizeof s))
                        r = -EFAULT;
                break;
@@ -711,15 +716,15 @@ static long vhost_set_vring(struct vhost_dev *d, int 
ioctl, void __user *argp)
                }
                /* For 32bit, verify that the top 32bits of the user
                   data are set to zero. */
-               if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
-                   (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
-                   (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) 
{
+               if ((u64)(ulong)a.desc_user_addr != a.desc_user_addr ||
+                   (u64)(ulong)a.used_user_addr != a.used_user_addr ||
+                   (u64)(ulong)a.avail_user_addr != a.avail_user_addr) {
                        r = -EFAULT;
                        break;
                }
-               if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
-                   (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
-                   (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+               if ((a.avail_user_addr & (sizeof *vq->hst.vr.avail->ring-1)) ||
+                   (a.used_user_addr & (sizeof *vq->hst.vr.used->ring-1)) ||
+                   (a.log_guest_addr & (sizeof *vq->hst.vr.used->ring-1))) {
                        r = -EINVAL;
                        break;
                }
@@ -728,10 +733,10 @@ static long vhost_set_vring(struct vhost_dev *d, int 
ioctl, void __user *argp)
                 * If it is not, we don't as size might not have been setup.
                 * We will verify when backend is configured. */
                if (vq->private_data) {
-                       if (!vq_access_ok(d, vq->num,
-                               (void __user *)(unsigned long)a.desc_user_addr,
-                               (void __user *)(unsigned long)a.avail_user_addr,
-                               (void __user *)(unsigned 
long)a.used_user_addr)) {
+                       if (!vq_access_ok(d, vq->hst.vr.num,
+                           (void __user *)(ulong)a.desc_user_addr,
+                           (void __user *)(ulong)a.avail_user_addr,
+                           (void __user *)(ulong)a.used_user_addr)) {
                                r = -EINVAL;
                                break;
                        }
@@ -739,18 +744,22 @@ static long vhost_set_vring(struct vhost_dev *d, int 
ioctl, void __user *argp)
                        /* Also validate log access for used ring if enabled. */
                        if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
                            !log_access_ok(vq->log_base, a.log_guest_addr,
-                                          sizeof *vq->used +
-                                          vq->num * sizeof *vq->used->ring)) {
+                                          sizeof *vq->hst.vr.used +
+                                          vq->hst.vr.num *
+                                          sizeof *vq->hst.vr.used->ring)) {
                                r = -EINVAL;
                                break;
                        }
                }
 
                vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
-               vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
-               vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
+               vq->hst.vr.desc =
+                       (void __user *)(ulong)a.desc_user_addr;
+               vq->hst.vr.avail =
+                       (void __user *)(ulong)a.avail_user_addr;
                vq->log_addr = a.log_guest_addr;
-               vq->used = (void __user *)(unsigned long)a.used_user_addr;
+               vq->hst.vr.used =
+                       (void __user *)(ulong)a.used_user_addr;
                break;
        case VHOST_SET_VRING_KICK:
                if (copy_from_user(&f, argp, sizeof f)) {
@@ -829,7 +838,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, 
void __user *argp)
 }
 
 /* Caller must have device mutex */
-long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long 
arg)
+long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, ulong arg)
 {
        void __user *argp = (void __user *)arg;
        struct file *eventfp, *filep = NULL;
@@ -858,13 +867,13 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int 
ioctl, unsigned long arg)
                        r = -EFAULT;
                        break;
                }
-               if ((u64)(unsigned long)p != p) {
+               if ((u64)(ulong)p != p) {
                        r = -EFAULT;
                        break;
                }
                for (i = 0; i < d->nvqs; ++i) {
                        struct vhost_virtqueue *vq;
-                       void __user *base = (void __user *)(unsigned long)p;
+                       void __user *base = (void __user *)(ulong)p;
                        vq = d->vqs + i;
                        mutex_lock(&vq->mutex);
                        /* If ring is inactive, will check when it's enabled. */
@@ -932,7 +941,7 @@ static const struct vhost_memory_region *find_region(struct 
vhost_memory *mem,
  */
 static int set_bit_to_user(int nr, void __user *addr)
 {
-       unsigned long log = (unsigned long)addr;
+       ulong log = (ulong)addr;
        struct page *page;
        void *base;
        int bit = nr + (log % PAGE_SIZE) * 8;
@@ -960,12 +969,12 @@ static int log_write(void __user *log_base,
                return 0;
        write_length += write_address % VHOST_PAGE_SIZE;
        for (;;) {
-               u64 base = (u64)(unsigned long)log_base;
+               u64 base = (u64)(ulong)log_base;
                u64 log = base + write_page / 8;
                int bit = write_page % 8;
-               if ((u64)(unsigned long)log != log)
+               if ((u64)(ulong)log != log)
                        return -EFAULT;
-               r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
+               r = set_bit_to_user(bit, (void __user *)(ulong)log);
                if (r < 0)
                        return r;
                if (write_length <= VHOST_PAGE_SIZE)
@@ -1003,16 +1012,16 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct 
vhost_log *log,
 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 {
        void __user *used;
-       if (__put_user(vq->used_flags, &vq->used->flags) < 0)
+       if (__put_user(vq->used_flags, &vq->hst.vr.used->flags) < 0)
                return -EFAULT;
        if (unlikely(vq->log_used)) {
                /* Make sure the flag is seen before log. */
                smp_wmb();
                /* Log used flag write. */
-               used = &vq->used->flags;
+               used = &vq->hst.vr.used->flags;
                log_write(vq->log_base, vq->log_addr +
-                         (used - (void __user *)vq->used),
-                         sizeof vq->used->flags);
+                         (used - (void __user *)vq->hst.vr.used),
+                         sizeof vq->hst.vr.used->flags);
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
        }
@@ -1021,7 +1030,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue 
*vq)
 
 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 
avail_event)
 {
-       if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
+       if (__put_user(vq->hst.avail_idx, vhost_avail_event(vq)))
                return -EFAULT;
        if (unlikely(vq->log_used)) {
                void __user *used;
@@ -1030,7 +1039,7 @@ static int vhost_update_avail_event(struct 
vhost_virtqueue *vq, u16 avail_event)
                /* Log avail event write */
                used = vhost_avail_event(vq);
                log_write(vq->log_base, vq->log_addr +
-                         (used - (void __user *)vq->used),
+                         (used - (void __user *)vq->hst.vr.used),
                          sizeof *vhost_avail_event(vq));
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
@@ -1048,7 +1057,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
        if (r)
                return r;
        vq->signalled_used_valid = false;
-       return get_user(vq->last_used_idx, &vq->used->idx);
+       return get_user(vq->hst.last_used_idx, &vq->hst.vr.used->idx);
 }
 
 static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
@@ -1077,7 +1086,7 @@ static int translate_desc(struct vhost_dev *dev, u64 
addr, u32 len,
                _iov = iov + ret;
                size = reg->memory_size - addr + reg->guest_phys_addr;
                _iov->iov_len = min((u64)len, size);
-               _iov->iov_base = (void __user *)(unsigned long)
+               _iov->iov_base = (void __user *)(ulong)
                        (reg->userspace_addr + addr - reg->guest_phys_addr);
                s += size;
                addr += size;
@@ -1216,22 +1225,23 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct 
vhost_virtqueue *vq,
        int ret;
 
        /* Check it isn't doing very strange things with descriptor numbers. */
-       last_avail_idx = vq->last_avail_idx;
-       if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
+       last_avail_idx = vq->hst.last_avail_idx;
+       if (unlikely(__get_user(vq->hst.avail_idx, &vq->hst.vr.avail->idx))) {
                vq_err(vq, "Failed to access avail idx at %p\n",
-                      &vq->avail->idx);
+                      &vq->hst.vr.avail->idx);
                return -EFAULT;
        }
 
-       if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
+       if (unlikely((u16)(vq->hst.avail_idx -
+                          last_avail_idx) > vq->hst.vr.num)) {
                vq_err(vq, "Guest moved used index from %u to %u",
-                      last_avail_idx, vq->avail_idx);
+                      last_avail_idx, vq->hst.avail_idx);
                return -EFAULT;
        }
 
        /* If there's nothing new since last we looked, return invalid. */
-       if (vq->avail_idx == last_avail_idx)
-               return vq->num;
+       if (vq->hst.avail_idx == last_avail_idx)
+               return vq->hst.vr.num;
 
        /* Only get avail ring entries after they have been exposed by guest. */
        smp_rmb();
@@ -1239,17 +1249,19 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct 
vhost_virtqueue *vq,
        /* Grab the next descriptor number they're advertising, and increment
         * the index we've seen. */
        if (unlikely(__get_user(head,
-                               &vq->avail->ring[last_avail_idx % vq->num]))) {
+                               &vq->hst.vr.avail->ring[last_avail_idx %
+                                                       vq->hst.vr.num]))) {
                vq_err(vq, "Failed to read head: idx %d address %p\n",
                       last_avail_idx,
-                      &vq->avail->ring[last_avail_idx % vq->num]);
+                      &vq->hst.vr.avail->ring[last_avail_idx %
+                                              vq->hst.vr.num]);
                return -EFAULT;
        }
 
        /* If their number is silly, that's an error. */
-       if (unlikely(head >= vq->num)) {
+       if (unlikely(head >= vq->hst.vr.num)) {
                vq_err(vq, "Guest says index %u > %u is available",
-                      head, vq->num);
+                      head, vq->hst.vr.num);
                return -EINVAL;
        }
 
@@ -1261,21 +1273,21 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct 
vhost_virtqueue *vq,
        i = head;
        do {
                unsigned iov_count = *in_num + *out_num;
-               if (unlikely(i >= vq->num)) {
+               if (unlikely(i >= vq->hst.vr.num)) {
                        vq_err(vq, "Desc index is %u > %u, head = %u",
-                              i, vq->num, head);
+                              i, vq->hst.vr.num, head);
                        return -EINVAL;
                }
-               if (unlikely(++found > vq->num)) {
+               if (unlikely(++found > vq->hst.vr.num)) {
                        vq_err(vq, "Loop detected: last one at %u "
                               "vq size %u head %u\n",
-                              i, vq->num, head);
+                              i, vq->hst.vr.num, head);
                        return -EINVAL;
                }
-               ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
+               ret = __copy_from_user(&desc, vq->hst.vr.desc + i, sizeof desc);
                if (unlikely(ret)) {
                        vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
-                              i, vq->desc + i);
+                              i, vq->hst.vr.desc + i);
                        return -EFAULT;
                }
                if (desc.flags & VRING_DESC_F_INDIRECT) {
@@ -1319,7 +1331,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct 
vhost_virtqueue *vq,
        } while ((i = next_desc(&desc)) != -1);
 
        /* On success, increment avail index. */
-       vq->last_avail_idx++;
+       vq->hst.last_avail_idx++;
 
        /* Assume notifications from guest are disabled at this point,
         * if they aren't we would need to update avail_event index. */
@@ -1330,7 +1342,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct 
vhost_virtqueue *vq,
 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
 {
-       vq->last_avail_idx -= n;
+       vq->hst.last_avail_idx -= n;
 }
 
 /* After we've used one of their buffers, we tell them about it.  We'll then
@@ -1341,7 +1353,7 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned 
int head, int len)
 
        /* The virtqueue contains a ring of used buffers.  Get a pointer to the
         * next entry in that used ring. */
-       used = &vq->used->ring[vq->last_used_idx % vq->num];
+       used = &vq->hst.vr.used->ring[vq->hst.last_used_idx % vq->hst.vr.num];
        if (__put_user(head, &used->id)) {
                vq_err(vq, "Failed to write used id");
                return -EFAULT;
@@ -1352,7 +1364,7 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned 
int head, int len)
        }
        /* Make sure buffer is written before we update index. */
        smp_wmb();
-       if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
+       if (__put_user(vq->hst.last_used_idx + 1, &vq->hst.vr.used->idx)) {
                vq_err(vq, "Failed to increment used idx");
                return -EFAULT;
        }
@@ -1362,21 +1374,22 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned 
int head, int len)
                /* Log used ring entry write. */
                log_write(vq->log_base,
                          vq->log_addr +
-                          ((void __user *)used - (void __user *)vq->used),
+                          ((void __user *)used -
+                           (void __user *)vq->hst.vr.used),
                          sizeof *used);
                /* Log used index update. */
                log_write(vq->log_base,
                          vq->log_addr + offsetof(struct vring_used, idx),
-                         sizeof vq->used->idx);
+                         sizeof vq->hst.vr.used->idx);
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
        }
-       vq->last_used_idx++;
+       vq->hst.last_used_idx++;
        /* If the driver never bothers to signal in a very long while,
         * used index might wrap around. If that happens, invalidate
         * signalled_used index we stored. TODO: make sure driver
         * signals at least once in 2^16 and remove this. */
-       if (unlikely(vq->last_used_idx == vq->signalled_used))
+       if (unlikely(vq->hst.last_used_idx == vq->signalled_used))
                vq->signalled_used_valid = false;
        return 0;
 }
@@ -1389,8 +1402,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
        u16 old, new;
        int start;
 
-       start = vq->last_used_idx % vq->num;
-       used = vq->used->ring + start;
+       start = vq->hst.last_used_idx % vq->hst.vr.num;
+       used = vq->hst.vr.used->ring + start;
        if (__copy_to_user(used, heads, count * sizeof *used)) {
                vq_err(vq, "Failed to write used");
                return -EFAULT;
@@ -1401,11 +1414,12 @@ static int __vhost_add_used_n(struct vhost_virtqueue 
*vq,
                /* Log used ring entry write. */
                log_write(vq->log_base,
                          vq->log_addr +
-                          ((void __user *)used - (void __user *)vq->used),
+                          ((void __user *)used -
+                           (void __user *)vq->hst.vr.used),
                          count * sizeof *used);
        }
-       old = vq->last_used_idx;
-       new = (vq->last_used_idx += count);
+       old = vq->hst.last_used_idx;
+       new = (vq->hst.last_used_idx += count);
        /* If the driver never bothers to signal in a very long while,
         * used index might wrap around. If that happens, invalidate
         * signalled_used index we stored. TODO: make sure driver
@@ -1422,8 +1436,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct 
vring_used_elem *heads,
 {
        int start, n, r;
 
-       start = vq->last_used_idx % vq->num;
-       n = vq->num - start;
+       start = vq->hst.last_used_idx % vq->hst.vr.num;
+       n = vq->hst.vr.num - start;
        if (n < count) {
                r = __vhost_add_used_n(vq, heads, n);
                if (r < 0)
@@ -1435,7 +1449,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct 
vring_used_elem *heads,
 
        /* Make sure buffer is written before we update index. */
        smp_wmb();
-       if (put_user(vq->last_used_idx, &vq->used->idx)) {
+       if (put_user(vq->hst.last_used_idx, &vq->hst.vr.used->idx)) {
                vq_err(vq, "Failed to increment used idx");
                return -EFAULT;
        }
@@ -1443,7 +1457,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct 
vring_used_elem *heads,
                /* Log used index update. */
                log_write(vq->log_base,
                          vq->log_addr + offsetof(struct vring_used, idx),
-                         sizeof vq->used->idx);
+                         sizeof vq->hst.vr.used->idx);
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
        }
@@ -1460,12 +1474,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct 
vhost_virtqueue *vq)
        smp_mb();
 
        if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
-           unlikely(vq->avail_idx == vq->last_avail_idx))
+           unlikely(vq->hst.avail_idx == vq->hst.last_avail_idx))
                return true;
 
        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
                __u16 flags;
-               if (__get_user(flags, &vq->avail->flags)) {
+               if (__get_user(flags, &vq->hst.vr.avail->flags)) {
                        vq_err(vq, "Failed to get flags");
                        return true;
                }
@@ -1473,7 +1487,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct 
vhost_virtqueue *vq)
        }
        old = vq->signalled_used;
        v = vq->signalled_used_valid;
-       new = vq->signalled_used = vq->last_used_idx;
+       new = vq->signalled_used = vq->hst.last_used_idx;
        vq->signalled_used_valid = true;
 
        if (unlikely(!v))
@@ -1525,13 +1539,14 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct 
vhost_virtqueue *vq)
                r = vhost_update_used_flags(vq);
                if (r) {
                        vq_err(vq, "Failed to enable notification at %p: %d\n",
-                              &vq->used->flags, r);
+                              &vq->hst.vr.used->flags, r);
                        return false;
                }
        } else {
-               r = vhost_update_avail_event(vq, vq->avail_idx);
+               r = vhost_update_avail_event(vq, vq->hst.avail_idx);
                if (r) {
-                       vq_err(vq, "Failed to update avail event index at %p: 
%d\n",
+                       vq_err(vq,
+                              "Failed to update avail event index at %p: %d\n",
                               vhost_avail_event(vq), r);
                        return false;
                }
@@ -1539,14 +1554,14 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct 
vhost_virtqueue *vq)
        /* They could have slipped one in as we were doing that: make
         * sure it's written, then check again. */
        smp_mb();
-       r = __get_user(avail_idx, &vq->avail->idx);
+       r = __get_user(avail_idx, &vq->hst.vr.avail->idx);
        if (r) {
                vq_err(vq, "Failed to check avail idx at %p: %d\n",
-                      &vq->avail->idx, r);
+                      &vq->hst.vr.avail->idx, r);
                return false;
        }
 
-       return avail_idx != vq->avail_idx;
+       return avail_idx != vq->hst.avail_idx;
 }
 
 /* We don't need to be notified again. */
@@ -1561,7 +1576,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct 
vhost_virtqueue *vq)
                r = vhost_update_used_flags(vq);
                if (r)
                        vq_err(vq, "Failed to enable notification at %p: %d\n",
-                              &vq->used->flags, r);
+                              &vq->hst.vr.used->flags, r);
        }
 }
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1125af3..4ab8c8f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -76,10 +76,7 @@ struct vhost_virtqueue {
 
        /* The actual ring of buffers. */
        struct mutex mutex;
-       unsigned int num;
-       struct vring_desc __user *desc;
-       struct vring_avail __user *avail;
-       struct vring_used __user *used;
+       struct vring_host hst;
        struct file *kick;
        struct file *call;
        struct file *error;
@@ -92,15 +89,6 @@ struct vhost_virtqueue {
        /* The routine to call when the Guest pings us, or timeout. */
        vhost_work_fn_t handle_kick;
 
-       /* Last available index we saw. */
-       u16 last_avail_idx;
-
-       /* Caches available index value from user. */
-       u16 avail_idx;
-
-       /* Last index we used. */
-       u16 last_used_idx;
-
        /* Used flags */
        u16 used_flags;
 
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 63c6ea1..7917dac 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -7,6 +7,19 @@
 struct virtio_device;
 struct virtqueue;
 
+struct vring_host {
+       struct vring vr;
+
+       /* Last available index we saw. */
+       u16 last_avail_idx;
+
+       /* Caches available index value from user. */
+       u16 avail_idx;
+
+       /* Last index we used. */
+       u16 last_used_idx;
+};
+
 struct virtqueue *vring_new_virtqueue(unsigned int index,
                                      unsigned int num,
                                      unsigned int vring_align,
-- 
1.7.5.4

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to