Hi,

On Tue, 2023-03-28 at 20:04 +0800, Xuan Zhuo wrote:
> The purpose of this patch is to simplify the receive_mergeable().
> Separate all the logic of XDP into a function.
> 
> Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 128 +++++++++++++++++++++++----------------
>  1 file changed, 76 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 136131a7868a..c8978d8d8adb 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1316,6 +1316,63 @@ static void *mergeable_xdp_prepare(struct virtnet_info 
> *vi,
>       return page_address(xdp_page) + VIRTIO_XDP_HEADROOM;
>  }
>  
> +static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
> +                                          struct virtnet_info *vi,
> +                                          struct receive_queue *rq,
> +                                          struct bpf_prog *xdp_prog,
> +                                          void *buf,
> +                                          void *ctx,
> +                                          unsigned int len,
> +                                          unsigned int *xdp_xmit,
> +                                          struct virtnet_rq_stats *stats)
> +{
> +     struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
> +     int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
> +     struct page *page = virt_to_head_page(buf);
> +     int offset = buf - page_address(page);
> +     unsigned int xdp_frags_truesz = 0;
> +     struct sk_buff *head_skb;
> +     unsigned int frame_sz;
> +     struct xdp_buff xdp;
> +     void *data;
> +     u32 act;
> +     int err;
> +
> +     data = mergeable_xdp_prepare(vi, rq, xdp_prog, ctx, &frame_sz, 
> &num_buf, &page,
> +                                  offset, &len, hdr);
> +     if (!data)
> +             goto err_xdp;
> +
> +     err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
> +                                      &num_buf, &xdp_frags_truesz, stats);
> +     if (unlikely(err))
> +             goto err_xdp;
> +
> +     act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> +
> +     switch (act) {
> +     case VIRTNET_XDP_RES_PASS:
> +             head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, 
> xdp_frags_truesz);
> +             if (unlikely(!head_skb))
> +                     goto err_xdp;
> +             return head_skb;
> +
> +     case VIRTNET_XDP_RES_CONSUMED:
> +             return NULL;
> +
> +     case VIRTNET_XDP_RES_DROP:
> +             break;
> +     }
> +
> +err_xdp:
> +     put_page(page);
> +     mergeable_buf_free(rq, num_buf, dev, stats);
> +
> +     stats->xdp_drops++;
> +     stats->drops++;
> +     return NULL;
> +}
> +
>  static struct sk_buff *receive_mergeable(struct net_device *dev,
>                                        struct virtnet_info *vi,
>                                        struct receive_queue *rq,
> @@ -1325,21 +1382,22 @@ static struct sk_buff *receive_mergeable(struct 
> net_device *dev,
>                                        unsigned int *xdp_xmit,
>                                        struct virtnet_rq_stats *stats)
>  {
> -     struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
> -     int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
> -     struct page *page = virt_to_head_page(buf);
> -     int offset = buf - page_address(page);
> -     struct sk_buff *head_skb, *curr_skb;
> -     struct bpf_prog *xdp_prog;
>       unsigned int truesize = mergeable_ctx_to_truesize(ctx);
>       unsigned int headroom = mergeable_ctx_to_headroom(ctx);
>       unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
>       unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
> -     unsigned int frame_sz;
> -     int err;
> +     struct virtio_net_hdr_mrg_rxbuf *hdr;
> +     struct sk_buff *head_skb, *curr_skb;
> +     struct bpf_prog *xdp_prog;
> +     struct page *page;
> +     int num_buf;
> +     int offset;
>  
>       head_skb = NULL;
>       stats->bytes += len - vi->hdr_len;
> +     hdr = buf;
> +     num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
> +     page = virt_to_head_page(buf);
>  
>       if (unlikely(len > truesize - room)) {
>               pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
> @@ -1348,51 +1406,21 @@ static struct sk_buff *receive_mergeable(struct 
> net_device *dev,
>               goto err_skb;
>       }
>  
> -     if (likely(!vi->xdp_enabled)) {
> -             xdp_prog = NULL;
> -             goto skip_xdp;
> -     }
> -
> -     rcu_read_lock();
> -     xdp_prog = rcu_dereference(rq->xdp_prog);
> -     if (xdp_prog) {
> -             unsigned int xdp_frags_truesz = 0;
> -             struct xdp_buff xdp;
> -             void *data;
> -             u32 act;
> -
> -             data = mergeable_xdp_prepare(vi, rq, xdp_prog, ctx, &frame_sz, 
> &num_buf, &page,
> -                                          offset, &len, hdr);
> -             if (!data)
> -                     goto err_xdp;
> -
> -             err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, 
> frame_sz,
> -                                              &num_buf, &xdp_frags_truesz, 
> stats);
> -             if (unlikely(err))
> -                     goto err_xdp;
> -
> -             act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> -
> -             switch (act) {
> -             case VIRTNET_XDP_RES_PASS:
> -                     head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, 
> xdp_frags_truesz);
> -                     if (unlikely(!head_skb))
> -                             goto err_xdp;
> -
> +     if (likely(vi->xdp_enabled)) {

This changes the branch prediction hint compared to the existing code;
as we currently have:
        if (likely(!vi->xdp_enabled)) {


and I think it would be better avoid such change.

Thanks,

Paolo

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to