On Wed, May 08, 2024 at 02:37:17PM +0800, Xuan Zhuo wrote:
> Now, the premapped mode can be enabled unconditionally.
> 
> So we can remove the failover code for merge and small mode.
> 
> Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
> Acked-by: Jason Wang <jasow...@redhat.com>
> ---
>  drivers/net/virtio_net.c | 85 +++++++++++++++++-----------------------
>  1 file changed, 35 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a2452d35bb93..070a6ed0d812 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -344,9 +344,6 @@ struct receive_queue {
>  
>       /* Record the last dma info to free after new pages is allocated. */
>       struct virtnet_rq_dma *last_dma;
> -
> -     /* Do dma by self */
> -     bool do_dma;
>  };
>  
>  /* This structure can contain rss message with maximum settings for 
> indirection table and keysize
> @@ -846,7 +843,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, 
> u32 *len, void **ctx)
>       void *buf;
>  
>       buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
> -     if (buf && rq->do_dma)
> +     if (buf)
>               virtnet_rq_unmap(rq, buf, *len);
>  
>       return buf;
> @@ -859,11 +856,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue 
> *rq, void *buf, u32 len)
>       u32 offset;
>       void *head;
>  
> -     if (!rq->do_dma) {
> -             sg_init_one(rq->sg, buf, len);
> -             return;
> -     }
> -
>       head = page_address(rq->alloc_frag.page);
>  
>       offset = buf - head;
> @@ -889,44 +881,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, 
> u32 size, gfp_t gfp)
>  
>       head = page_address(alloc_frag->page);
>  
> -     if (rq->do_dma) {
> -             dma = head;
> -
> -             /* new pages */
> -             if (!alloc_frag->offset) {
> -                     if (rq->last_dma) {
> -                             /* Now, the new page is allocated, the last dma
> -                              * will not be used. So the dma can be unmapped
> -                              * if the ref is 0.
> -                              */
> -                             virtnet_rq_unmap(rq, rq->last_dma, 0);
> -                             rq->last_dma = NULL;
> -                     }
> +     dma = head;
>  
> -                     dma->len = alloc_frag->size - sizeof(*dma);
> +     /* new pages */
> +     if (!alloc_frag->offset) {
> +             if (rq->last_dma) {
> +                     /* Now, the new page is allocated, the last dma
> +                      * will not be used. So the dma can be unmapped
> +                      * if the ref is 0.
> +                      */
> +                     virtnet_rq_unmap(rq, rq->last_dma, 0);
> +                     rq->last_dma = NULL;
> +             }
>  
> -                     addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> -                                                           dma->len, 
> DMA_FROM_DEVICE, 0);
> -                     if (virtqueue_dma_mapping_error(rq->vq, addr))
> -                             return NULL;
> +             dma->len = alloc_frag->size - sizeof(*dma);
>  
> -                     dma->addr = addr;
> -                     dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> +             addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> +                                                   dma->len, 
> DMA_FROM_DEVICE, 0);
> +             if (virtqueue_dma_mapping_error(rq->vq, addr))
> +                     return NULL;
>  
> -                     /* Add a reference to dma to prevent the entire dma from
> -                      * being released during error handling. This reference
> -                      * will be freed after the pages are no longer used.
> -                      */
> -                     get_page(alloc_frag->page);
> -                     dma->ref = 1;
> -                     alloc_frag->offset = sizeof(*dma);
> +             dma->addr = addr;
> +             dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
>  
> -                     rq->last_dma = dma;
> -             }
> +             /* Add a reference to dma to prevent the entire dma from
> +              * being released during error handling. This reference
> +              * will be freed after the pages are no longer used.
> +              */
> +             get_page(alloc_frag->page);
> +             dma->ref = 1;
> +             alloc_frag->offset = sizeof(*dma);
>  
> -             ++dma->ref;
> +             rq->last_dma = dma;
>       }
>  
> +     ++dma->ref;
> +
>       buf = head + alloc_frag->offset;
>  
>       get_page(alloc_frag->page);
> @@ -943,12 +933,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info 
> *vi)
>       if (!vi->mergeable_rx_bufs && vi->big_packets)
>               return;
>  
> -     for (i = 0; i < vi->max_queue_pairs; i++) {
> -             if (virtqueue_set_dma_premapped(vi->rq[i].vq))
> -                     continue;
> -
> -             vi->rq[i].do_dma = true;
> -     }
> +     for (i = 0; i < vi->max_queue_pairs; i++)
> +             /* error never happen */

/* error should never happen */

Code seems fine
Reviewed-by: Larysa Zaremba <larysa.zare...@intel.com>

> +             BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
>  }
>  
>  static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
> @@ -2020,8 +2007,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, 
> struct receive_queue *rq,
>  
>       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
>       if (err < 0) {
> -             if (rq->do_dma)
> -                     virtnet_rq_unmap(rq, buf, 0);
> +             virtnet_rq_unmap(rq, buf, 0);
>               put_page(virt_to_head_page(buf));
>       }
>  
> @@ -2135,8 +2121,7 @@ static int add_recvbuf_mergeable(struct virtnet_info 
> *vi,
>       ctx = mergeable_len_to_ctx(len + room, headroom);
>       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
>       if (err < 0) {
> -             if (rq->do_dma)
> -                     virtnet_rq_unmap(rq, buf, 0);
> +             virtnet_rq_unmap(rq, buf, 0);
>               put_page(virt_to_head_page(buf));
>       }
>  
> @@ -5206,7 +5191,7 @@ static void free_receive_page_frags(struct virtnet_info 
> *vi)
>       int i;
>       for (i = 0; i < vi->max_queue_pairs; i++)
>               if (vi->rq[i].alloc_frag.page) {
> -                     if (vi->rq[i].do_dma && vi->rq[i].last_dma)
> +                     if (vi->rq[i].last_dma)
>                               virtnet_rq_unmap(&vi->rq[i], 
> vi->rq[i].last_dma, 0);
>                       put_page(vi->rq[i].alloc_frag.page);
>               }
> -- 
> 2.32.0.3.g01195cf9f
> 
> 

Reply via email to