On 28/03/18(Wed) 11:28, David Gwynne wrote:
> On Thu, Mar 15, 2018 at 03:25:46PM +0100, Martin Pieuchot wrote:
> > On 14/03/18(Wed) 13:00, David Gwynne wrote:
> > > this adds transmit mitigation back to the tree.
> > > 
> > > it is basically the same diff as last time. the big difference this
> > > time is that all the tunnel drivers all defer ip_output calls, which
> > > avoids having to play games with NET_LOCK in the ifq transmit paths.
> > 
> > Comments inline.
> > 
> > > + if (ifq_len(ifq) >= min(4, ifq->ifq_maxlen)) {
> > 
> > Why 4?  DragonFly recently bumped `ifsq_stage_cntmax' to 16.  Did you
> > try other values?  They also have an XXX comment that this value should
> > be per-interface.  Why?
> 
> their default was 4, and they'd done some research on it. if they
> moved to 16 there would be a reason for it.
> 
> > In any case I'd be happier with a define.
> 
> i've taken your advice on board and made it per interface, defaulted
> to 16 with a macro. after this i can add ioctls to report it (under
> hwfeatures maybe) and change it with ifconfig.
> 
> > >  ifq_barrier(struct ifqueue *ifq)
> > >  {
> > >   struct cond c = COND_INITIALIZER();
> > >   struct task t = TASK_INITIALIZER(ifq_barrier_task, &c);
> > >  
> > > + NET_ASSERT_UNLOCKED();
> > 
> > Since you assert here...
> > 
> > > +
> > > + if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle)) {
> > > +         int netlocked = (rw_status(&netlock) == RW_WRITE);
> >                 ^^^^^^^^^
> > You can remove this code.
> 
> i can't get rid of the assert :(
> 
> ifq_barrier is called from lots of places, eg, ifq_destroy and
> ix_down. the former does not hold the net lock, but the latter does.
> putting manual NET_UNLOCK calls around places like the latter is
> too much work imo.

You're right, getting rid of the NET_LOCK() is too much work.  That's
why I'm being rude for such pattern not to spread.  I'd prefer an
assert and move the dance in the only place that needs it.

Anyway `if_sndmit' should be [d] since its a driver variable, not a
stack one.

Apart from that ok mpi@

> retrieving revision 1.548
> diff -u -p -r1.548 if.c
> --- if.c      2 Mar 2018 15:52:11 -0000       1.548
> +++ if.c      28 Mar 2018 01:28:03 -0000
> @@ -607,6 +607,7 @@ if_attach_common(struct ifnet *ifp)
>       ifp->if_snd.ifq_ifqs[0] = &ifp->if_snd;
>       ifp->if_ifqs = ifp->if_snd.ifq_ifqs;
>       ifp->if_nifqs = 1;
> +     ifp->if_sndmit = IF_SNDMIT_DEFAULT;
>  
>       ifiq_init(&ifp->if_rcv, ifp, 0);
>  
> Index: if_var.h
> ===================================================================
> RCS file: /cvs/src/sys/net/if_var.h,v
> retrieving revision 1.89
> diff -u -p -r1.89 if_var.h
> --- if_var.h  10 Jan 2018 23:50:39 -0000      1.89
> +++ if_var.h  28 Mar 2018 01:28:03 -0000
> @@ -170,6 +170,7 @@ struct ifnet {                            /* and the 
> entries */
>       struct  ifqueue **if_ifqs;      /* [I] pointer to an array of sndqs */
>       void    (*if_qstart)(struct ifqueue *);
>       unsigned int if_nifqs;          /* [I] number of output queues */
> +     unsigned int if_sndmit;         /* [c] tx mitigation amount */
>  
>       struct  ifiqueue if_rcv;        /* rx/input queue */
>       struct  ifiqueue **if_iqs;      /* [I] pointer to the array of iqs */
> @@ -279,6 +280,9 @@ do {                                                      
>                 \
>  #define      IFQ_LEN(ifq)                    ifq_len(ifq)
>  #define      IFQ_IS_EMPTY(ifq)               ifq_empty(ifq)
>  #define      IFQ_SET_MAXLEN(ifq, len)        ifq_set_maxlen(ifq, len)
> +
> +#define IF_SNDMIT_MIN                        1
> +#define IF_SNDMIT_DEFAULT            16
>  
>  /* default interface priorities */
>  #define IF_WIRED_DEFAULT_PRIORITY    0
> Index: ifq.c
> ===================================================================
> RCS file: /cvs/src/sys/net/ifq.c,v
> retrieving revision 1.22
> diff -u -p -r1.22 ifq.c
> --- ifq.c     25 Jan 2018 14:04:36 -0000      1.22
> +++ ifq.c     28 Mar 2018 01:28:03 -0000
> @@ -70,9 +70,16 @@ struct priq {
>  void ifq_start_task(void *);
>  void ifq_restart_task(void *);
>  void ifq_barrier_task(void *);
> +void ifq_bundle_task(void *);
>  
>  #define TASK_ONQUEUE 0x1
>  
> +static inline void
> +ifq_run_start(struct ifqueue *ifq)
> +{
> +     ifq_serialize(ifq, &ifq->ifq_start);
> +}
> +
>  void
>  ifq_serialize(struct ifqueue *ifq, struct task *t)
>  {
> @@ -114,6 +121,16 @@ ifq_is_serialized(struct ifqueue *ifq)
>  }
>  
>  void
> +ifq_start(struct ifqueue *ifq)
> +{
> +     if (ifq_len(ifq) >= min(ifq->ifq_if->if_sndmit, ifq->ifq_maxlen)) {
> +             task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
> +             ifq_run_start(ifq);
> +     } else
> +             task_add(ifq->ifq_softnet, &ifq->ifq_bundle);
> +}
> +
> +void
>  ifq_start_task(void *p)
>  {
>       struct ifqueue *ifq = p;
> @@ -137,11 +154,31 @@ ifq_restart_task(void *p)
>  }
>  
>  void
> +ifq_bundle_task(void *p)
> +{
> +     struct ifqueue *ifq = p;
> +
> +     ifq_run_start(ifq);
> +}
> +
> +void
>  ifq_barrier(struct ifqueue *ifq)
>  {
>       struct cond c = COND_INITIALIZER();
>       struct task t = TASK_INITIALIZER(ifq_barrier_task, &c);
>  
> +     if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle)) {
> +             int netlocked = (rw_status(&netlock) == RW_WRITE);
> +
> +             if (netlocked) /* XXXSMP breaks atomicity */
> +                     NET_UNLOCK();
> +
> +             taskq_barrier(ifq->ifq_softnet);
> +
> +             if (netlocked)
> +                     NET_LOCK();
> +     }
> +
>       if (ifq->ifq_serializer == NULL)
>               return;
>  
> @@ -166,6 +203,7 @@ void
>  ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx)
>  {
>       ifq->ifq_if = ifp;
> +     ifq->ifq_softnet = net_tq(ifp->if_index);
>       ifq->ifq_softc = NULL;
>  
>       mtx_init(&ifq->ifq_mtx, IPL_NET);
> @@ -187,6 +225,7 @@ ifq_init(struct ifqueue *ifq, struct ifn
>       mtx_init(&ifq->ifq_task_mtx, IPL_NET);
>       TAILQ_INIT(&ifq->ifq_task_list);
>       ifq->ifq_serializer = NULL;
> +     task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq);
>  
>       task_set(&ifq->ifq_start, ifq_start_task, ifq);
>       task_set(&ifq->ifq_restart, ifq_restart_task, ifq);
> @@ -237,6 +276,8 @@ void
>  ifq_destroy(struct ifqueue *ifq)
>  {
>       struct mbuf_list ml = MBUF_LIST_INITIALIZER();
> +
> +     ifq_barrier(ifq); /* ensure nothing is running with the ifq */
>  
>       /* don't need to lock because this is the last use of the ifq */
>  
> Index: ifq.h
> ===================================================================
> RCS file: /cvs/src/sys/net/ifq.h,v
> retrieving revision 1.20
> diff -u -p -r1.20 ifq.h
> --- ifq.h     4 Jan 2018 11:02:57 -0000       1.20
> +++ ifq.h     28 Mar 2018 01:28:03 -0000
> @@ -25,6 +25,7 @@ struct ifq_ops;
>  
>  struct ifqueue {
>       struct ifnet            *ifq_if;
> +     struct taskq            *ifq_softnet;
>       union {
>               void                    *_ifq_softc;
>               /*
> @@ -57,6 +58,7 @@ struct ifqueue {
>       struct mutex             ifq_task_mtx;
>       struct task_list         ifq_task_list;
>       void                    *ifq_serializer;
> +     struct task              ifq_bundle;
>  
>       /* work to be serialised */
>       struct task              ifq_start;
> @@ -405,6 +407,7 @@ void               ifq_attach(struct ifqueue *, cons
>  void          ifq_destroy(struct ifqueue *);
>  void          ifq_add_data(struct ifqueue *, struct if_data *);
>  int           ifq_enqueue(struct ifqueue *, struct mbuf *);
> +void          ifq_start(struct ifqueue *);
>  struct mbuf  *ifq_deq_begin(struct ifqueue *);
>  void          ifq_deq_commit(struct ifqueue *, struct mbuf *);
>  void          ifq_deq_rollback(struct ifqueue *, struct mbuf *);
> @@ -438,12 +441,6 @@ static inline unsigned int
>  ifq_is_oactive(struct ifqueue *ifq)
>  {
>       return (ifq->ifq_oactive);
> -}
> -
> -static inline void
> -ifq_start(struct ifqueue *ifq)
> -{
> -     ifq_serialize(ifq, &ifq->ifq_start);
>  }
>  
>  static inline void
> 

Reply via email to