On 03/14/2018 05:16 PM, Eric Dumazet wrote:

> 
> typical use after free...
> 
> diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
> index 
> 190570f21b208d5a17943360a3a6f85e1c2a2187..663e016491773f40f81d9bbfeab3dd68e1c2fc5c
>  100644
> --- a/net/sched/sch_generic.c
> +++ b/net/sched/sch_generic.c
> @@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct 
> Qdisc *qdisc,
>         int band = prio2band[skb->priority & TC_PRIO_MAX];
>         struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
>         struct skb_array *q = band2list(priv, band);
> +       unsigned int pkt_len = qdisc_pkt_len(skb);
>         int err;
>  
>         err = skb_array_produce(q, skb);
> @@ -636,7 +637,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct 
> Qdisc *qdisc,
>                 return qdisc_drop_cpu(skb, qdisc, to_free);
>  
>         qdisc_qstats_cpu_qlen_inc(qdisc);
> -       qdisc_qstats_cpu_backlog_inc(qdisc, skb);
> +       this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
>         return NET_XMIT_SUCCESS;
>  }
> 
There is also a similar issue right after qdisc_enqueue_skb_bad_txq() call.

We should move the following code in qdisc_enqueue_skb_bad_txq() to benefit 
from the locking

                       if (qdisc_is_percpu_stats(q)) {
                               qdisc_qstats_cpu_backlog_inc(q, nskb);
                               qdisc_qstats_cpu_qlen_inc(q);
                       } else {
                               qdisc_qstats_backlog_inc(q, nskb);
                               q->q.qlen++;
                       }


I will post a patch with the two fixes.

Reply via email to