Re: [PATCH v8 08/50] virtio: memory access APIs

2014-12-02 Thread Prabhakar Lad
Hi Michael,

Thanks for the patch.

On Mon, Dec 1, 2014 at 4:03 PM, Michael S. Tsirkin  wrote:
> virtio 1.0 makes all memory structures LE, so
[Snip]
> +/*
> + * Low-level memory accessors for handling virtio in modern little endian 
> and in
> + * compatibility native endian format.
> + */
> +
> +static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
> +{
> +   if (little_endian)
> +   return le16_to_cpu((__force __le16)val);
> +   else
> +   return (__force u16)val;
> +}
> +
> +static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
> +{
> +   if (little_endian)
> +   return (__force __virtio16)cpu_to_le16(val);
> +   else
> +   return (__force __virtio16)val;
> +}
> +
> +static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
> +{
> +   if (little_endian)
> +   return le32_to_cpu((__force __le32)val);
> +   else
> +   return (__force u32)val;
> +}
> +
> +static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
> +{
> +   if (little_endian)
> +   return (__force __virtio32)cpu_to_le32(val);
> +   else
> +   return (__force __virtio32)val;
> +}
> +
> +static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
> +{
> +   if (little_endian)
> +   return le64_to_cpu((__force __le64)val);
> +   else
> +   return (__force u64)val;
> +}
> +
> +static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
> +{
> +   if (little_endian)
> +   return (__force __virtio64)cpu_to_le64(val);
> +   else
> +   return (__force __virtio64)val;
> +}
> +

Nitpicking, could remove the else for the all above functions and
align the return appropriately ?

Apart from that patch looks good.

Acked-by: Lad, Prabhakar 

Thanks,
--Prabhakar Lad
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v8 08/50] virtio: memory access APIs

2014-12-01 Thread Michael S. Tsirkin
virtio 1.0 makes all memory structures LE, so
we need APIs to conditionally do a byteswap on BE
architectures.

To make it easier to check code statically,
add virtio specific types for multi-byte integers
in memory.

Add low level wrappers that do a byteswap conditionally, these will be
useful e.g. for vhost.  Add high level wrappers that
query device endian-ness and act accordingly.

Signed-off-by: Michael S. Tsirkin 
Reviewed-by: Cornelia Huck 
---
 include/linux/virtio_byteorder.h  | 59 +++
 include/linux/virtio_config.h | 32 +
 include/uapi/linux/virtio_ring.h  | 45 ++---
 include/uapi/linux/virtio_types.h | 46 ++
 include/uapi/linux/Kbuild |  1 +
 5 files changed, 161 insertions(+), 22 deletions(-)
 create mode 100644 include/linux/virtio_byteorder.h
 create mode 100644 include/uapi/linux/virtio_types.h

diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
new file mode 100644
index 000..51865d0
--- /dev/null
+++ b/include/linux/virtio_byteorder.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_VIRTIO_BYTEORDER_H
+#define _LINUX_VIRTIO_BYTEORDER_H
+#include 
+#include 
+
+/*
+ * Low-level memory accessors for handling virtio in modern little endian and 
in
+ * compatibility native endian format.
+ */
+
+static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
+{
+   if (little_endian)
+   return le16_to_cpu((__force __le16)val);
+   else
+   return (__force u16)val;
+}
+
+static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
+{
+   if (little_endian)
+   return (__force __virtio16)cpu_to_le16(val);
+   else
+   return (__force __virtio16)val;
+}
+
+static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
+{
+   if (little_endian)
+   return le32_to_cpu((__force __le32)val);
+   else
+   return (__force u32)val;
+}
+
+static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
+{
+   if (little_endian)
+   return (__force __virtio32)cpu_to_le32(val);
+   else
+   return (__force __virtio32)val;
+}
+
+static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
+{
+   if (little_endian)
+   return le64_to_cpu((__force __le64)val);
+   else
+   return (__force u64)val;
+}
+
+static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
+{
+   if (little_endian)
+   return (__force __virtio64)cpu_to_le64(val);
+   else
+   return (__force __virtio64)val;
+}
+
+#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index f517884..02f0acb 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -4,6 +4,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 /**
@@ -199,6 +200,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
 }
 
+/* Memory accessors */
+static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
+{
+   return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), 
val);
+}
+
+static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
+{
+   return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), 
val);
+}
+
+static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
+{
+   return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), 
val);
+}
+
+static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
+{
+   return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), 
val);
+}
+
+static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
+{
+   return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), 
val);
+}
+
+static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
+{
+   return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), 
val);
+}
+
 /* Config space accessors. */
 #define virtio_cread(vdev, structname, member, ptr)\
do {\
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a99f9b7..61c818a 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -32,6 +32,7 @@
  *
  * Copyright Rusty Russell IBM Corporation 2007. */
 #include 
+#include 
 
 /* This marks a buffer as continuing via the next field. */
 #define VRING_DESC_F_NEXT  1
@@ -61,32 +62,32 @@
 /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
 struct vring_desc {
/* Address (guest-physical). */
-   __u64 addr;
+   __virtio64 addr;
/* Length. */
-   __u32 len;
+   __virtio32 le