Re: [PATCH -next 3/4] sched: remove qdisc_rehape_fail
On Wed, 2016-06-08 at 23:01 +0200, Florian Westphal wrote: > > Would you mind an annotation rather than covering the hole? > > --- a/include/net/sch_generic.h > +++ b/include/net/sch_generic.h > @@ -71,11 +71,11 @@ struct Qdisc { > struct gnet_stats_basic_cpu __percpu *cpu_bstats; > struct gnet_stats_queue __percpu *cpu_qstats; > > - struct Qdisc*next_sched; > - struct sk_buff *gso_skb; > /* > * For performance sake on SMP, we put highly modified fields at the > end > */ > + struct Qdisc*next_sched cacheline_aligned_in_smp; > + struct sk_buff *gso_skb; > > > ... it creates 16 byte hole after cpu_qstats and keeps the rest as-is > (i.e. next_sched is at beginning of 2nd cacheline, as before the removal). > > I could also cover the hole by moving rcu_head there but it seems fragile > and doesn't reduce total struct size anyway (we get larger hole at end). > > If you have no objection I'd resubmit the series as-is but with this patch. > > Let me know, thanks Eric! This all looks fine to me.
Re: [PATCH -next 3/4] sched: remove qdisc_rehape_fail
Eric Dumazetwrote: > On Wed, 2016-06-08 at 17:35 +0200, Florian Westphal wrote: > > After the removal of TCA_CBQ_POLICE in cbq scheduler qdisc->reshape_fail > > is always NULL, i.e. qdisc_rehape_fail is now the same as qdisc_drop. > > > > Signed-off-by: Florian Westphal > > --- > > include/net/sch_generic.h | 19 --- > > net/sched/sch_fifo.c | 4 ++-- > > net/sched/sch_netem.c | 4 ++-- > > net/sched/sch_plug.c | 2 +- > > net/sched/sch_tbf.c | 4 ++-- > > 5 files changed, 7 insertions(+), 26 deletions(-) > > > > diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h > > index c069ac1..a9aec63 100644 > > --- a/include/net/sch_generic.h > > +++ b/include/net/sch_generic.h > > @@ -63,9 +63,6 @@ struct Qdisc { > > struct list_headlist; > > u32 handle; > > u32 parent; > > - int (*reshape_fail)(struct sk_buff *skb, > > - struct Qdisc *q); > > - > > void*u32_node; > > > > You removed 2 pointers from Qdisc, so now next_sched & gso_skb are in a > different cache line than ->state > > Some performance penalty is expected, unless you move a read_mostly > field there to compensate. Would you mind an annotation rather than covering the hole? --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -71,11 +71,11 @@ struct Qdisc { struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; - struct Qdisc*next_sched; - struct sk_buff *gso_skb; /* * For performance sake on SMP, we put highly modified fields at the end */ + struct Qdisc*next_sched cacheline_aligned_in_smp; + struct sk_buff *gso_skb; ... it creates 16 byte hole after cpu_qstats and keeps the rest as-is (i.e. next_sched is at beginning of 2nd cacheline, as before the removal). I could also cover the hole by moving rcu_head there but it seems fragile and doesn't reduce total struct size anyway (we get larger hole at end). If you have no objection I'd resubmit the series as-is but with this patch. Let me know, thanks Eric!
Re: [PATCH -next 3/4] sched: remove qdisc_rehape_fail
On Wed, 2016-06-08 at 17:35 +0200, Florian Westphal wrote: > After the removal of TCA_CBQ_POLICE in cbq scheduler qdisc->reshape_fail > is always NULL, i.e. qdisc_rehape_fail is now the same as qdisc_drop. > > Signed-off-by: Florian Westphal> --- > include/net/sch_generic.h | 19 --- > net/sched/sch_fifo.c | 4 ++-- > net/sched/sch_netem.c | 4 ++-- > net/sched/sch_plug.c | 2 +- > net/sched/sch_tbf.c | 4 ++-- > 5 files changed, 7 insertions(+), 26 deletions(-) > > diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h > index c069ac1..a9aec63 100644 > --- a/include/net/sch_generic.h > +++ b/include/net/sch_generic.h > @@ -63,9 +63,6 @@ struct Qdisc { > struct list_headlist; > u32 handle; > u32 parent; > - int (*reshape_fail)(struct sk_buff *skb, > - struct Qdisc *q); > - > void*u32_node; > You removed 2 pointers from Qdisc, so now next_sched & gso_skb are in a different cache line than ->state Some performance penalty is expected, unless you move a read_mostly field there to compensate.
[PATCH -next 3/4] sched: remove qdisc_rehape_fail
After the removal of TCA_CBQ_POLICE in cbq scheduler qdisc->reshape_fail is always NULL, i.e. qdisc_rehape_fail is now the same as qdisc_drop. Signed-off-by: Florian Westphal--- include/net/sch_generic.h | 19 --- net/sched/sch_fifo.c | 4 ++-- net/sched/sch_netem.c | 4 ++-- net/sched/sch_plug.c | 2 +- net/sched/sch_tbf.c | 4 ++-- 5 files changed, 7 insertions(+), 26 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c069ac1..a9aec63 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -63,9 +63,6 @@ struct Qdisc { struct list_headlist; u32 handle; u32 parent; - int (*reshape_fail)(struct sk_buff *skb, - struct Qdisc *q); - void*u32_node; struct netdev_queue *dev_queue; @@ -771,22 +768,6 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_DROP; } -static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) -{ - qdisc_qstats_drop(sch); - -#ifdef CONFIG_NET_CLS_ACT - if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) - goto drop; - - return NET_XMIT_SUCCESS; - -drop: -#endif - kfree_skb(skb); - return NET_XMIT_DROP; -} - /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how long it will take to send a packet given its size. */ diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 2177eac..7857f74 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -24,7 +24,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) return qdisc_enqueue_tail(skb, sch); - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch); } static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) @@ -32,7 +32,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (likely(skb_queue_len(>q) < sch->limit)) return qdisc_enqueue_tail(skb, sch); - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch); } static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 205bed0..31984c7 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -407,7 +407,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) { - qdisc_reshape_fail(skb, sch); + qdisc_drop(skb, sch); return NULL; } consume_skb(skb); @@ -499,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } if (unlikely(skb_queue_len(>q) >= sch->limit)) - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch); qdisc_qstats_backlog_inc(sch, skb); diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index 5abfe44..ff0d968 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -96,7 +96,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) return qdisc_enqueue_tail(skb, sch); } - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch); } static struct sk_buff *plug_dequeue(struct Qdisc *sch) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 83b90b5..801148c 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -166,7 +166,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch); nb = 0; while (segs) { @@ -198,7 +198,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (qdisc_pkt_len(skb) > q->max_size) { if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) return tbf_segment(skb, sch); - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch); } ret = qdisc_enqueue(skb, q->qdisc); if (ret != NET_XMIT_SUCCESS) { -- 2.7.3