Hello,

very small first step towards MP(i) friendly PF. Patch adds mutex around
fragment cache.

Patch adds a lock around fragment cache. Unlike other parts of PF the fragment
cache is self-contained subsystem. In that sense we can easily guard its entry
points (pf_reassemble(), pf_reassemble6()) by mutex. The cache is shared
by both protocols (AF_INET, AF_INET6), hence we have just one lock.

The locks (technically speaking mutexes) for other PF subsystems will follow as
soon as the remove&destroy operations for PF data objects will get untangled.
What essentially needs to be done is to split remove and destroy operations for
PF objects into separate functions. This is something, what's being worked on
currently.

As you can see the mutex, when acquired, raises  interrupt level to softnet.
Same interrupt level is used by ioctl() and purge threads. IMO it should be
fine, but I'd like to hear some confirmation...


any OKs?

thanks and
regards
sasha

--------8<---------------8<---------------8<------------------8<--------
Index: pf_norm.c
===================================================================
RCS file: /cvs/src/sys/net/pf_norm.c,v
retrieving revision 1.182
diff -u -p -r1.182 pf_norm.c
--- pf_norm.c   10 Sep 2015 08:28:31 -0000      1.182
+++ pf_norm.c   12 Sep 2015 17:18:43 -0000
@@ -134,6 +134,7 @@ int                  pf_reassemble6(struct mbuf **, st
 struct pool             pf_frent_pl, pf_frag_pl;
 struct pool             pf_state_scrub_pl;
 int                     pf_nfrents;
+struct mutex            pf_frag_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
 
 void
 pf_normalize_init(void)
@@ -771,6 +772,7 @@ pf_normalize_ip(struct pf_pdesc *pd, u_s
        struct ip       *h = mtod(pd->m, struct ip *);
        u_int16_t        fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
        u_int16_t        mff = (ntohs(h->ip_off) & IP_MF);
+       int              rv;
 
        if (!fragoff && !mff)
                goto no_fragment;
@@ -792,8 +794,11 @@ pf_normalize_ip(struct pf_pdesc *pd, u_s
        if (!pf_status.reass)
                return (PF_PASS);       /* no reassembly */
 
+       PF_FRAG_LOCK();
        /* Returns PF_DROP or m is NULL or completely reassembled mbuf */
-       if (pf_reassemble(&pd->m, pd->dir, reason) != PF_PASS)
+       rv = pf_reassemble(&pd->m, pd->dir, reason);
+       PF_FRAG_UNLOCK();
+       if (rv != PF_PASS)
                return (PF_DROP);
        if (pd->m == NULL)
                return (PF_PASS);  /* packet has been reassembled, no error */
@@ -813,6 +818,7 @@ int
 pf_normalize_ip6(struct pf_pdesc *pd, u_short *reason)
 {
        struct ip6_frag          frag;
+       int                      rv;
 
        if (pd->fragoff == 0)
                goto no_fragment;
@@ -824,9 +830,12 @@ pf_normalize_ip6(struct pf_pdesc *pd, u_
        if (!pf_status.reass)
                return (PF_PASS);       /* no reassembly */
 
+       PF_FRAG_LOCK();
        /* Returns PF_DROP or m is NULL or completely reassembled mbuf */
-       if (pf_reassemble6(&pd->m, &frag, pd->fragoff + sizeof(frag),
-           pd->extoff, pd->dir, reason) != PF_PASS)
+       rv = pf_reassemble6(&pd->m, &frag, pd->fragoff + sizeof(frag),
+           pd->extoff, pd->dir, reason);
+       PF_FRAG_UNLOCK();
+       if (rv != PF_PASS)
                return (PF_DROP);
        if (pd->m == NULL)
                return (PF_PASS);  /* packet has been reassembled, no error */
Index: pfvar.h
===================================================================
RCS file: /cvs/src/sys/net/pfvar.h,v
retrieving revision 1.420
diff -u -p -r1.420 pfvar.h
--- pfvar.h     19 Aug 2015 21:22:41 -0000      1.420
+++ pfvar.h     12 Sep 2015 17:18:43 -0000
@@ -1907,7 +1907,10 @@ int                       pf_postprocess_addr(struct 
pf_sta
 
 void                    pf_cksum(struct pf_pdesc *, struct mbuf *);
 
-#endif /* _KERNEL */
+extern struct mutex pf_frag_mtx;
+#define        PF_FRAG_LOCK()          mtx_enter(&pf_frag_mtx)
+#define        PF_FRAG_UNLOCK()        mtx_leave(&pf_frag_mtx)
 
+#endif /* _KERNEL */
 
 #endif /* _NET_PFVAR_H_ */

Reply via email to