This patch put the dma addr error check in vring_map_one_sg().

The benefits of doing this:

1. reduce one judgment of vq->use_dma_api.
2. make vring_map_one_sg more simple, without calling
   vring_mapping_error to check the return value. simplifies subsequent
   code

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
Acked-by: Jason Wang <jasow...@redhat.com>
---
 drivers/virtio/virtio_ring.c | 37 +++++++++++++++++++++---------------
 1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f8754f1d64d3..87d7ceeecdbd 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -355,9 +355,8 @@ static struct device *vring_dma_dev(const struct 
vring_virtqueue *vq)
 }
 
 /* Map one sg entry. */
-static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
-                                  struct scatterlist *sg,
-                                  enum dma_data_direction direction)
+static int vring_map_one_sg(const struct vring_virtqueue *vq, struct 
scatterlist *sg,
+                           enum dma_data_direction direction, dma_addr_t *addr)
 {
        if (!vq->use_dma_api) {
                /*
@@ -366,7 +365,8 @@ static dma_addr_t vring_map_one_sg(const struct 
vring_virtqueue *vq,
                 * depending on the direction.
                 */
                kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, 
direction);
-               return (dma_addr_t)sg_phys(sg);
+               *addr = (dma_addr_t)sg_phys(sg);
+               return 0;
        }
 
        /*
@@ -374,9 +374,14 @@ static dma_addr_t vring_map_one_sg(const struct 
vring_virtqueue *vq,
         * the way it expects (we don't guarantee that the scatterlist
         * will exist for the lifetime of the mapping).
         */
-       return dma_map_page(vring_dma_dev(vq),
+       *addr = dma_map_page(vring_dma_dev(vq),
                            sg_page(sg), sg->offset, sg->length,
                            direction);
+
+       if (dma_mapping_error(vring_dma_dev(vq), *addr))
+               return -ENOMEM;
+
+       return 0;
 }
 
 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
@@ -588,8 +593,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
        for (n = 0; n < out_sgs; n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-                       dma_addr_t addr = vring_map_one_sg(vq, sg, 
DMA_TO_DEVICE);
-                       if (vring_mapping_error(vq, addr))
+                       dma_addr_t addr;
+
+                       if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
                                goto unmap_release;
 
                        prev = i;
@@ -603,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        }
        for (; n < (out_sgs + in_sgs); n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-                       dma_addr_t addr = vring_map_one_sg(vq, sg, 
DMA_FROM_DEVICE);
-                       if (vring_mapping_error(vq, addr))
+                       dma_addr_t addr;
+
+                       if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
                                goto unmap_release;
 
                        prev = i;
@@ -1281,9 +1288,8 @@ static int virtqueue_add_indirect_packed(struct 
vring_virtqueue *vq,
 
        for (n = 0; n < out_sgs + in_sgs; n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-                       addr = vring_map_one_sg(vq, sg, n < out_sgs ?
-                                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
-                       if (vring_mapping_error(vq, addr))
+                       if (vring_map_one_sg(vq, sg, n < out_sgs ?
+                                            DMA_TO_DEVICE : DMA_FROM_DEVICE, 
&addr))
                                goto unmap_release;
 
                        desc[i].flags = cpu_to_le16(n < out_sgs ?
@@ -1428,9 +1434,10 @@ static inline int virtqueue_add_packed(struct virtqueue 
*_vq,
        c = 0;
        for (n = 0; n < out_sgs + in_sgs; n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-                       dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
-                                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
-                       if (vring_mapping_error(vq, addr))
+                       dma_addr_t addr;
+
+                       if (vring_map_one_sg(vq, sg, n < out_sgs ?
+                                            DMA_TO_DEVICE : DMA_FROM_DEVICE, 
&addr))
                                goto unmap_release;
 
                        flags = cpu_to_le16(vq->packed.avail_used_flags |
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to