On Thu, May 25, 2023 at 03:20:04AM +0000, Klemens Nanni wrote:
> pfsync_in_bus() looks like the only place where the static array
> pf_pool_limits[] is accessed without the pf lock, so grab it there.
> 
> Limits themselves are protected by the pf lock and pool(9)s are never
> destroyed and have builtint per-pool locks, so the net lock is not
> needed.
> 
> (pf_pool_limits[] access in DIOCXCOMMIT remains under pf *and net* lock
>  until the rest in there gets pulled out of the net lock.)
> 
> Feedback? OK?

Correct diff without typo and with missing locking comment.


Index: pf_ioctl.c
===================================================================
RCS file: /cvs/src/sys/net/pf_ioctl.c,v
retrieving revision 1.404
diff -u -p -r1.404 pf_ioctl.c
--- pf_ioctl.c  11 May 2023 12:36:22 -0000      1.404
+++ pf_ioctl.c  24 May 2023 13:48:52 -0000
@@ -2094,44 +2094,37 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a
                        error = EINVAL;
                        goto fail;
                }
-               NET_LOCK();
                PF_LOCK();
                pl->limit = pf_pool_limits[pl->index].limit;
                PF_UNLOCK();
-               NET_UNLOCK();
                break;
        }
 
        case DIOCSETLIMIT: {
                struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
 
-               NET_LOCK();
                PF_LOCK();
                if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
                        error = EINVAL;
                        PF_UNLOCK();
-                       NET_UNLOCK();
                        goto fail;
                }
                if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
                    pl->limit) {
                        error = EBUSY;
                        PF_UNLOCK();
-                       NET_UNLOCK();
                        goto fail;
                }
                /* Fragments reference mbuf clusters. */
                if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
                        error = EINVAL;
                        PF_UNLOCK();
-                       NET_UNLOCK();
                        goto fail;
                }
 
                pf_pool_limits[pl->index].limit_new = pl->limit;
                pl->limit = pf_pool_limits[pl->index].limit;
                PF_UNLOCK();
-               NET_UNLOCK();
                break;
        }
 
Index: if_pfsync.c
===================================================================
RCS file: /cvs/src/sys/net/if_pfsync.c,v
retrieving revision 1.315
diff -u -p -r1.315 if_pfsync.c
--- if_pfsync.c 18 May 2023 12:10:04 -0000      1.315
+++ if_pfsync.c 25 May 2023 03:27:29 -0000
@@ -1009,10 +1009,12 @@ pfsync_in_bus(caddr_t buf, int len, int 
 
        switch (bus->status) {
        case PFSYNC_BUS_START:
+               PF_LOCK();
                timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
                    pf_pool_limits[PF_LIMIT_STATES].limit /
                    ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
                    sizeof(struct pfsync_state)));
+               PF_UNLOCK();
                DPFPRINTF(LOG_INFO, "received bulk update start");
                break;
 
@@ -2037,10 +2039,12 @@ pfsync_request_full_update(struct pfsync
 #endif
                pfsync_sync_ok = 0;
                DPFPRINTF(LOG_INFO, "requesting bulk update");
+               PF_LOCK();
                timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
                    pf_pool_limits[PF_LIMIT_STATES].limit /
                    ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
                    sizeof(struct pfsync_state)));
+               PF_UNLOCK();
                pfsync_request_update(0, 0);
        }
 }

Reply via email to