On 08/31/2018 05:26 PM, Jesper Dangaard Brouer wrote:
> The compiler does an efficient job of inlining static C functions.
> Perf top clearly shows that almost everything gets inlined into the
> function call xdp_do_redirect.
> 
> The function xdp_do_redirect end-up containing and interleaving the
> map and non-map redirect code.  This is sub-optimal, as it would be
> strange for an XDP program to use both types of redirect in the same
> program. The two use-cases are separate, and interleaving the code
> just cause more instruction-cache pressure.
> 
> I would like to stress (again) that the non-map variant bpf_redirect
> is very slow compared to the bpf_redirect_map variant, approx half the
> speed.  Measured with driver i40e the difference is:
> 
> - map     redirect: 13,250,350 pps
> - non-map redirect:  7,491,425 pps
> 
> For this reason, the function name of the non-map variant of redirect
> have been called xdp_do_redirect_slow.  This hopefully gives a hint
> when using perf, that this is not the optimal XDP redirect operating mode.
> 
> Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
> ---
>  net/core/filter.c |   52 ++++++++++++++++++++++++++++++----------------------
>  1 file changed, 30 insertions(+), 22 deletions(-)
> 
> diff --git a/net/core/filter.c b/net/core/filter.c
> index ec1b4eb0d3d4..c4ad1b93167f 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -3170,6 +3170,32 @@ static int __bpf_tx_xdp(struct net_device *dev,
>       return 0;
>  }
>  
> +/* non-static to avoid inline by compiler */
> +int xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,

Nit: should be 'static noinline' in that case then.

> +                     struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
> +{
> +     struct net_device *fwd;
> +     u32 index = ri->ifindex;
> +     int err;
> +
> +     fwd = dev_get_by_index_rcu(dev_net(dev), index);
> +     ri->ifindex = 0;
> +     if (unlikely(!fwd)) {
> +             err = -EINVAL;
> +             goto err;
> +     }
> +
> +     err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
> +     if (unlikely(err))
> +             goto err;
> +
> +     _trace_xdp_redirect(dev, xdp_prog, index);
> +     return 0;
> +err:
> +     _trace_xdp_redirect_err(dev, xdp_prog, index, err);
> +     return err;
> +}
> +
>  static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
>                           struct bpf_map *map,
>                           struct xdp_buff *xdp,
> @@ -3264,9 +3290,9 @@ void bpf_clear_redirect_map(struct bpf_map *map)
>  }
>  
>  static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
> -                            struct bpf_prog *xdp_prog, struct bpf_map *map)
> +                            struct bpf_prog *xdp_prog, struct bpf_map *map,
> +                            struct bpf_redirect_info *ri)
>  {
> -     struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
>       u32 index = ri->ifindex;
>       void *fwd = NULL;
>       int err;
> @@ -3299,29 +3325,11 @@ int xdp_do_redirect(struct net_device *dev, struct 
> xdp_buff *xdp,
>  {
>       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
>       struct bpf_map *map = READ_ONCE(ri->map);
> -     struct net_device *fwd;
> -     u32 index = ri->ifindex;
> -     int err;
>  
>       if (likely(map))
> -             return xdp_do_redirect_map(dev, xdp, xdp_prog, map);
> +             return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
>  
> -     fwd = dev_get_by_index_rcu(dev_net(dev), index);
> -     ri->ifindex = 0;
> -     if (unlikely(!fwd)) {
> -             err = -EINVAL;
> -             goto err;
> -     }
> -
> -     err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
> -     if (unlikely(err))
> -             goto err;
> -
> -     _trace_xdp_redirect(dev, xdp_prog, index);
> -     return 0;
> -err:
> -     _trace_xdp_redirect_err(dev, xdp_prog, index, err);
> -     return err;
> +     return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
>  }
>  EXPORT_SYMBOL_GPL(xdp_do_redirect);
>  
> 

Reply via email to