From: Alexander Graf <[email protected]>

KCSAN reports data races when accessing virtio ring fields that are
concurrently written by the device (host). These are legitimate
concurrent accesses where the CPU reads fields that the device updates
via DMA-like mechanisms.

Add accessor functions that use READ_ONCE() to properly annotate these
device-writable fields and prevent compiler optimizations that could in
theory break the code. This also serves as documentation showing which
fields are shared with the device.

The affected fields are:
- Split ring: used->idx, used->ring[].id, used->ring[].len
- Packed ring: desc[].flags, desc[].id, desc[].len

Signed-off-by: Alexander Graf <[email protected]>
[jth: Add READ_ONCE in virtqueue_kick_prepare_split ]
Co-developed-by: Johannes Thumshirn <[email protected]>
Signed-off-by: Johannes Thumshirn <[email protected]>

---
Changes to v1:
- Updated comments (mst, agraf)
- Moved _read suffix to prefix in newly introduced functions (mst)
- Update my minor contribution to Co-developed-by (agraf)
- Add "in theory" to changelog
---
 drivers/virtio/virtio_ring.c | 69 ++++++++++++++++++++++++++++--------
 1 file changed, 54 insertions(+), 15 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ddab68959671..66802d11d30e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -222,6 +222,48 @@ struct vring_virtqueue {
 #endif
 };
 
+/*
+ * Accessors for device-writable fields in virtio rings.
+ * These fields are concurrently written by the device and read by the driver.
+ * Use READ_ONCE() to prevent compiler optimizations and document the
+ * intentional data race and prevent KCSAN warnings.
+ */
+static inline u16 vring_read_used_idx(const struct vring_virtqueue *vq)
+{
+       return virtio16_to_cpu(vq->vq.vdev,
+                              READ_ONCE(vq->split.vring.used->idx));
+}
+
+static inline u32 vring_read_used_id(const struct vring_virtqueue *vq, u16 idx)
+{
+       return virtio32_to_cpu(vq->vq.vdev,
+                              READ_ONCE(vq->split.vring.used->ring[idx].id));
+}
+
+static inline u32 vring_read_used_len(const struct vring_virtqueue *vq, u16 
idx)
+{
+       return virtio32_to_cpu(vq->vq.vdev,
+                              READ_ONCE(vq->split.vring.used->ring[idx].len));
+}
+
+static inline u16 vring_read_packed_desc_flags(const struct vring_virtqueue 
*vq,
+                                              u16 idx)
+{
+       return le16_to_cpu(READ_ONCE(vq->packed.vring.desc[idx].flags));
+}
+
+static inline u16 vring_read_packed_desc_id(const struct vring_virtqueue *vq,
+                                           u16 idx)
+{
+       return le16_to_cpu(READ_ONCE(vq->packed.vring.desc[idx].id));
+}
+
+static inline u32 vring_read_packed_desc_len(const struct vring_virtqueue *vq,
+                                            u16 idx)
+{
+       return le32_to_cpu(READ_ONCE(vq->packed.vring.desc[idx].len));
+}
+
 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
 static void vring_free(struct virtqueue *_vq);
 
@@ -736,9 +778,10 @@ static bool virtqueue_kick_prepare_split(struct virtqueue 
*_vq)
        LAST_ADD_TIME_INVALID(vq);
 
        if (vq->event) {
-               needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
-                                       vring_avail_event(&vq->split.vring)),
-                                             new, old);
+               u16 event = virtio16_to_cpu(_vq->vdev,
+                               READ_ONCE(vring_avail_event(&vq->split.vring)));
+
+               needs_kick = vring_need_event(event, new, old);
        } else {
                needs_kick = !(vq->split.vring.used->flags &
                                        cpu_to_virtio16(_vq->vdev,
@@ -808,8 +851,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, 
unsigned int head,
 
 static bool more_used_split(const struct vring_virtqueue *vq)
 {
-       return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
-                       vq->split.vring.used->idx);
+       return vq->last_used_idx != vring_read_used_idx(vq);
 }
 
 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
@@ -838,10 +880,8 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue 
*_vq,
        virtio_rmb(vq->weak_barriers);
 
        last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
-       i = virtio32_to_cpu(_vq->vdev,
-                       vq->split.vring.used->ring[last_used].id);
-       *len = virtio32_to_cpu(_vq->vdev,
-                       vq->split.vring.used->ring[last_used].len);
+       i = vring_read_used_id(vq, last_used);
+       *len = vring_read_used_len(vq, last_used);
 
        if (unlikely(i >= vq->split.vring.num)) {
                BAD_RING(vq, "id %u out of range\n", i);
@@ -923,8 +963,7 @@ static bool virtqueue_poll_split(struct virtqueue *_vq, 
unsigned int last_used_i
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
-                       vq->split.vring.used->idx);
+       return (u16)last_used_idx != vring_read_used_idx(vq);
 }
 
 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
@@ -1701,10 +1740,10 @@ static void detach_buf_packed(struct vring_virtqueue 
*vq,
 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
                                       u16 idx, bool used_wrap_counter)
 {
-       bool avail, used;
        u16 flags;
+       bool avail, used;
 
-       flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
+       flags = vring_read_packed_desc_flags(vq, idx);
        avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
        used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
 
@@ -1751,8 +1790,8 @@ static void *virtqueue_get_buf_ctx_packed(struct 
virtqueue *_vq,
        last_used_idx = READ_ONCE(vq->last_used_idx);
        used_wrap_counter = packed_used_wrap_counter(last_used_idx);
        last_used = packed_last_used(last_used_idx);
-       id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
-       *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
+       id = vring_read_packed_desc_id(vq, last_used);
+       *len = vring_read_packed_desc_len(vq, last_used);
 
        if (unlikely(id >= vq->packed.vring.num)) {
                BAD_RING(vq, "id %u out of range\n", id);
-- 
2.52.0


Reply via email to