Introduce a host-side virtio queue implementation. The function
vring_new_host_virtqueue(), virtqueue_add_buf_to_used()
virtqueue_next_avail_desc() are added to virtio_ring_host.c

Signed-off-by: Sjur Brændeland <sjur.brandel...@stericsson.com>
---
 drivers/virtio/virtio_ring_host.c |  195 +++++++++++++++++++++++++++++++++++++
 include/linux/virtio.h            |    2 +
 include/linux/virtio_ring.h       |   23 +++++
 3 files changed, 220 insertions(+), 0 deletions(-)

diff --git a/drivers/virtio/virtio_ring_host.c 
b/drivers/virtio/virtio_ring_host.c
index 0750099..570e11e 100644
--- a/drivers/virtio/virtio_ring_host.c
+++ b/drivers/virtio/virtio_ring_host.c
@@ -19,10 +19,32 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/kconfig.h>
+#include <linux/slab.h>
 
 MODULE_LICENSE("GPL");
 
 
+struct vring_host_virtqueue {
+       struct virtqueue vq;
+
+       /* Actual memory layout for this queue */
+       struct vring_host vring;
+
+       /* Other side has made a mess, don't try any more. */
+       bool broken;
+};
+
+#define to_vvq(_vq) container_of(_vq, struct vring_host_virtqueue, vq)
+
+#define BAD_RING(_vq, fmt, args...)                            \
+       do {                                                    \
+               dev_err(&_vq->vq.vdev->dev,                     \
+                       "%s:"fmt, (_vq)->vq.name, ##args);      \
+               (_vq)->broken = true;                           \
+       } while (0)
+#define START_USE(vq)
+#define END_USE(vq)
+
 static inline struct vring_used_elem *_vring_add_used(struct vring_host *vh,
                                                      u32 head, u32 len,
                                                      bool (*cpy)(void *dst,
@@ -148,3 +170,176 @@ unsigned vring_next_desc(struct vring_desc *desc)
        return next;
 }
 EXPORT_SYMBOL(vring_next_desc);
+
+struct virtqueue *vring_new_host_virtqueue(unsigned int index,
+                                     unsigned int num,
+                                     unsigned int vring_align,
+                                     struct virtio_device *vdev,
+                                     bool weak_barriers,
+                                     void *pages,
+                                     void (*notify)(struct virtqueue *),
+                                     void (*callback)(struct virtqueue *),
+                                     const char *name)
+{
+       struct vring_host_virtqueue *vq;
+
+       /* We assume num is a power of 2. */
+       if (num & (num - 1)) {
+               dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
+               return NULL;
+       }
+
+       vq = kmalloc(sizeof(*vq), GFP_KERNEL);
+       if (!vq)
+               return NULL;
+
+       vring_init(&vq->vring.vr, num, pages, vring_align);
+       vq->vq.callback = callback;
+       vq->vq.vdev = vdev;
+       vq->vq.name = name;
+       vq->vq.num_free = num;
+       vq->vq.index = index;
+       vq->vq.weak_barriers = weak_barriers;
+       vq->vq.notify = notify;
+       vq->broken = false;
+       vq->vq.reversed = true;
+       list_add_tail(&vq->vq.list, &vdev->vqs);
+       /* FIX: What about no callback, should we tell pair not to bother us? */
+       return &vq->vq;
+}
+EXPORT_SYMBOL_GPL(vring_new_host_virtqueue);
+
+static inline bool _kernel_cpy_to(void  *dst, void *src, size_t s)
+{
+       memcpy(dst, src, s);
+       return true;
+}
+
+static inline bool _kernel_get(u16 *dst, u16 *src)
+{
+       *dst = *src;
+       return true;
+}
+
+static inline void _read_barrier(void)
+{
+       rmb();
+}
+
+/**
+ * virtqueue_next_avail_desc - get the next available descriptor
+ * @_vq: the struct virtqueue we're talking about
+ * @head: index of the descriptor in the ring
+ *
+ * Look for the next available descriptor in the available ring.
+ * Return NULL if nothing new in the available.
+ */
+struct vring_desc *virtqueue_next_avail_desc(struct virtqueue *_vq,
+                                                   int *head)
+{
+       struct vring_host_virtqueue *vq = to_vvq(_vq);
+       struct vring_desc *desc = NULL;
+       int hd = -1;
+
+       BUG_ON(!vq->vq.reversed);
+       if (unlikely(vq->broken))
+               goto out;
+
+       START_USE(vq);
+       virtio_rmb(vq);
+
+       hd = _vring_avail_desc(&vq->vring, _kernel_get, _read_barrier);
+       if (unlikely(hd < 0)) {
+               BAD_RING(vq, "Bad available descriptor avail:%d last:%d\n",
+                        vq->vring.avail_idx, vq->vring.last_avail_idx);
+               goto out;
+       }
+       if (likely(hd >= vq->vring.vr.num))
+               goto out;
+
+       desc = &vq->vring.vr.desc[hd];
+       vq->vring.last_avail_idx++;
+out:
+       *head = hd;
+       END_USE(vq);
+       return desc;
+}
+EXPORT_SYMBOL(virtqueue_next_avail_desc);
+
+/*
+ * virtqueue_next_linked_desc - get next linked descriptor from the ring
+ * @_vq: the struct virtqueue we're talking about
+ * @desc: "current" descriptor
+ *
+ * Each buffer in the virtqueues is a chain of descriptors. This
+ * function returns the next descriptor in the chain,* or NULL if we're at
+ * the end.
+ *
+ * Side effect: the function increments vq->last_avail_idx if a non-linked
+ * descriptor is passed as &desc argument.
+ */
+struct vring_desc *virtqueue_next_linked_desc(struct virtqueue *_vq,
+                                             struct vring_desc *desc)
+{
+       struct vring_host_virtqueue *vq = to_vvq(_vq);
+       unsigned int next;
+
+       BUG_ON(!vq->vq.reversed);
+       START_USE(vq);
+       next = vring_next_desc(desc);
+
+       if (next >= vq->vring.vr.num)
+               desc = NULL;
+       else
+               desc = &vq->vring.vr.desc[next];
+       END_USE(vq);
+       return desc;
+}
+EXPORT_SYMBOL(virtqueue_next_linked_desc);
+
+/*
+ * virtqueue_add_buf_to_used - release a used descriptor
+ * @_vq: the struct virtqueue we're talking about
+ * @head: index of the descriptor to be released
+ * @len: number of linked descriptors in a chain
+ *
+ * The function releases a used descriptor in a reversed ring
+ */
+int virtqueue_add_buf_to_used(struct virtqueue *_vq,
+                                    unsigned int head, int len)
+{
+       struct vring_host_virtqueue *vq = to_vvq(_vq);
+       struct vring_used_elem  *used;
+       int used_idx, err = -EINVAL;
+
+       BUG_ON(!vq->vq.reversed);
+       START_USE(vq);
+
+       if (unlikely(vq->broken))
+               goto err;
+
+       if (unlikely(head >= vq->vring.vr.num)) {
+               BAD_RING(vq, "Invalid head index (%u) > max desc idx (%u) ",
+                        head, vq->vring.vr.num - 1);
+               goto err;
+       }
+
+       /*
+        * The virtqueue contains a ring of used buffers.  Get a pointer to the
+        * next entry in that used ring.
+        */
+       used_idx = (vq->vring.vr.used->idx & (vq->vring.vr.num - 1));
+       used = &vq->vring.vr.used->ring[used_idx];
+       used->id = head;
+       used->len = len;
+
+       /* Make sure buffer is written before we update index. */
+       virtio_wmb(vq);
+       ++vq->vring.vr.used->idx;
+       err = 0;
+err:
+       END_USE(vq);
+       return err;
+
+}
+EXPORT_SYMBOL(virtqueue_add_buf_to_used);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index f513ba8..3ec2132 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -20,6 +20,7 @@
  * @index: the zero-based ordinal number for this queue.
  * @num_free: number of elements we expect to be able to fit.
  * @weak_barriers: indicate if we can use weak memory barriers.
+ * @reversed: indicate a reversed direction, i.e. a host-side virtio-ring
  *
  * A note on @num_free: with indirect buffers, each buffer needs one
  * element in the queue, otherwise a buffer will need one element per
@@ -35,6 +36,7 @@ struct virtqueue {
        unsigned int num_free;
        void *priv;
        bool weak_barriers;
+       bool reversed;
 };
 
 int virtqueue_add_buf(struct virtqueue *vq,
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 1a4023b..01c0f59 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -42,6 +42,29 @@ int vring_avail_desc_user(struct vring_host *vh);
 struct vring_used_elem *vring_add_used_user(struct vring_host *vh,
                                            unsigned int head, int len);
 
+unsigned vring_next_desc(struct vring_desc *desc);
+struct vring_desc *virtqueue_next_linked_desc(struct virtqueue *_vq,
+                                             struct vring_desc *desc);
+
+struct vring_desc *virtqueue_next_avail_desc(struct virtqueue *_vq,
+                                            int *head);
+
+struct virtqueue *vring_new_host_virtqueue(unsigned int index,
+                                          unsigned int num,
+                                          unsigned int vring_align,
+                                          struct virtio_device *vdev,
+                                          bool weak_barriers,
+                                          void *pages,
+                                          void (*notify)(struct virtqueue *),
+                                          void (*callback)(struct virtqueue *),
+                                          const char *name);
+
+
+int virtqueue_add_buf_to_used(struct virtqueue *_vq,
+                             unsigned int head, int len);
+
+
+
 /* virtio guest is communicating with a virtual "device" that actually runs on
  * a host processor.  Memory barriers are used to control SMP effects. */
 #ifdef CONFIG_SMP
-- 
1.7.5.4

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to