Author: glebius
Date: Thu Jan 22 01:23:16 2015
New Revision: 277519
URL: https://svnweb.freebsd.org/changeset/base/277519

Log:
  Back out r276841, r276756, r276747, r276746. The change in r276747 is very
  very questionable, since it makes vimages more dependent on each other. But
  the reason for the backout is that it screwed up shutting down the pf purge
  threads, and now kernel immedially panics on pf module unload. Although module
  unloading isn't an advertised feature of pf, it is very important for
  development process.
  
  I'd like to not backout r276746, since in general it is good. But since it
  has introduced numerous build breakages, that later were addressed in
  r276841, r276756, r276747, I need to back it out as well. Better replay it
  in clean fashion from scratch.

Modified:
  head/sys/net/pfvar.h
  head/sys/netpfil/pf/pf.c
  head/sys/netpfil/pf/pf_if.c
  head/sys/netpfil/pf/pf_ioctl.c
  head/sys/netpfil/pf/pf_norm.c
  head/sys/netpfil/pf/pf_table.c

Modified: head/sys/net/pfvar.h
==============================================================================
--- head/sys/net/pfvar.h        Thu Jan 22 00:52:34 2015        (r277518)
+++ head/sys/net/pfvar.h        Thu Jan 22 01:23:16 2015        (r277519)
@@ -829,6 +829,7 @@ typedef int pflog_packet_t(struct pfi_ki
     struct pf_ruleset *, struct pf_pdesc *, int);
 extern pflog_packet_t          *pflog_packet_ptr;
 
+#define        V_pf_end_threads        VNET(pf_end_threads)
 #endif /* _KERNEL */
 
 #define        PFSYNC_FLAG_SRCNODE     0x04
@@ -1494,7 +1495,7 @@ VNET_DECLARE(struct pf_altqqueue *,        pf_
 VNET_DECLARE(struct pf_rulequeue, pf_unlinked_rules);
 #define        V_pf_unlinked_rules     VNET(pf_unlinked_rules)
 
-void                            pf_vnet_initialize(void);
+void                            pf_initialize(void);
 void                            pf_mtag_initialize(void);
 void                            pf_mtag_cleanup(void);
 void                            pf_cleanup(void);
@@ -1586,7 +1587,7 @@ int       pf_match_addr_range(struct pf_addr *
            struct pf_addr *, sa_family_t);
 int    pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t);
 
-void   pf_vnet_normalize_init(void);
+void   pf_normalize_init(void);
 void   pf_normalize_cleanup(void);
 int    pf_normalize_ip(struct mbuf **, int, struct pfi_kif *, u_short *,
            struct pf_pdesc *);
@@ -1648,7 +1649,7 @@ MALLOC_DECLARE(PFI_MTYPE);
 VNET_DECLARE(struct pfi_kif *,          pfi_all);
 #define        V_pfi_all                        VNET(pfi_all)
 
-void            pfi_vnet_initialize(void);
+void            pfi_initialize(void);
 void            pfi_cleanup(void);
 void            pfi_kif_ref(struct pfi_kif *);
 void            pfi_kif_unref(struct pfi_kif *);

Modified: head/sys/netpfil/pf/pf.c
==============================================================================
--- head/sys/netpfil/pf/pf.c    Thu Jan 22 00:52:34 2015        (r277518)
+++ head/sys/netpfil/pf/pf.c    Thu Jan 22 01:23:16 2015        (r277519)
@@ -151,7 +151,6 @@ static VNET_DEFINE(struct pf_send_head, 
 #define        V_pf_sendqueue  VNET(pf_sendqueue)
 
 static struct mtx pf_sendqueue_mtx;
-MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
 #define        PF_SENDQ_LOCK()         mtx_lock(&pf_sendqueue_mtx)
 #define        PF_SENDQ_UNLOCK()       mtx_unlock(&pf_sendqueue_mtx)
 
@@ -173,15 +172,11 @@ static VNET_DEFINE(struct task, pf_overl
 #define        V_pf_overloadtask       VNET(pf_overloadtask)
 
 static struct mtx pf_overloadqueue_mtx;
-MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
-    "pf overload/flush queue", MTX_DEF);
 #define        PF_OVERLOADQ_LOCK()     mtx_lock(&pf_overloadqueue_mtx)
 #define        PF_OVERLOADQ_UNLOCK()   mtx_unlock(&pf_overloadqueue_mtx)
 
 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
 struct mtx pf_unlnkdrules_mtx;
-MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
-    MTX_DEF);
 
 static VNET_DEFINE(uma_zone_t, pf_sources_z);
 #define        V_pf_sources_z  VNET(pf_sources_z)
@@ -295,6 +290,8 @@ static void          pf_route6(struct mbuf **, 
 
 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
 
+VNET_DECLARE(int, pf_end_threads);
+
 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
 
 #define        PACKET_LOOPED(pd)       ((pd)->pf_mtag &&                       
\
@@ -731,7 +728,7 @@ pf_mtag_initialize()
 
 /* Per-vnet data storage structures initialization. */
 void
-pf_vnet_initialize()
+pf_initialize()
 {
        struct pf_keyhash       *kh;
        struct pf_idhash        *ih;
@@ -791,9 +788,13 @@ pf_vnet_initialize()
        STAILQ_INIT(&V_pf_sendqueue);
        SLIST_INIT(&V_pf_overloadqueue);
        TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
+       mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
+       mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
+           MTX_DEF);
 
        /* Unlinked, but may be referenced rules. */
        TAILQ_INIT(&V_pf_unlinked_rules);
+       mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
 }
 
 void
@@ -836,6 +837,10 @@ pf_cleanup()
                free(pfse, M_PFTEMP);
        }
 
+       mtx_destroy(&pf_sendqueue_mtx);
+       mtx_destroy(&pf_overloadqueue_mtx);
+       mtx_destroy(&pf_unlnkdrules_mtx);
+
        uma_zdestroy(V_pf_sources_z);
        uma_zdestroy(V_pf_state_z);
        uma_zdestroy(V_pf_state_key_z);
@@ -1381,37 +1386,71 @@ pf_intr(void *v)
 }
 
 void
-pf_purge_thread(void *v __unused)
+pf_purge_thread(void *v)
 {
        u_int idx = 0;
-       VNET_ITERATOR_DECL(vnet_iter);
+
+       CURVNET_SET((struct vnet *)v);
 
        for (;;) {
-               tsleep(pf_purge_thread, PWAIT, "pftm", hz / 10);
-               VNET_LIST_RLOCK();
-               VNET_FOREACH(vnet_iter) {
-                       CURVNET_SET(vnet_iter);
-                       /* Process 1/interval fraction of the state table every 
run. */
-                       idx = pf_purge_expired_states(idx, pf_hashmask /
-                                   (V_pf_default_rule.timeout[PFTM_INTERVAL] * 
10));
+               PF_RULES_RLOCK();
+               rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
 
-                       /* Purge other expired types every PFTM_INTERVAL 
seconds. */
-                       if (idx == 0) {
-                               /*
-                                * Order is important:
-                                * - states and src nodes reference rules
-                                * - states and rules reference kifs
-                                */
-                               pf_purge_expired_fragments();
-                               pf_purge_expired_src_nodes();
-                               pf_purge_unlinked_rules();
-                               pfi_kif_purge();
-                       }
-                       CURVNET_RESTORE();
+               if (V_pf_end_threads) {
+                       /*
+                        * To cleanse up all kifs and rules we need
+                        * two runs: first one clears reference flags,
+                        * then pf_purge_expired_states() doesn't
+                        * raise them, and then second run frees.
+                        */
+                       PF_RULES_RUNLOCK();
+                       pf_purge_unlinked_rules();
+                       pfi_kif_purge();
+
+                       /*
+                        * Now purge everything.
+                        */
+                       pf_purge_expired_states(0, pf_hashmask);
+                       pf_purge_expired_fragments();
+                       pf_purge_expired_src_nodes();
+
+                       /*
+                        * Now all kifs & rules should be unreferenced,
+                        * thus should be successfully freed.
+                        */
+                       pf_purge_unlinked_rules();
+                       pfi_kif_purge();
+
+                       /*
+                        * Announce success and exit.
+                        */
+                       PF_RULES_RLOCK();
+                       V_pf_end_threads++;
+                       PF_RULES_RUNLOCK();
+                       wakeup(pf_purge_thread);
+                       kproc_exit(0);
+               }
+               PF_RULES_RUNLOCK();
+
+               /* Process 1/interval fraction of the state table every run. */
+               idx = pf_purge_expired_states(idx, pf_hashmask /
+                           (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
+
+               /* Purge other expired types every PFTM_INTERVAL seconds. */
+               if (idx == 0) {
+                       /*
+                        * Order is important:
+                        * - states and src nodes reference rules
+                        * - states and rules reference kifs
+                        */
+                       pf_purge_expired_fragments();
+                       pf_purge_expired_src_nodes();
+                       pf_purge_unlinked_rules();
+                       pfi_kif_purge();
                }
-               VNET_LIST_RUNLOCK();
        }
        /* not reached */
+       CURVNET_RESTORE();
 }
 
 u_int32_t

Modified: head/sys/netpfil/pf/pf_if.c
==============================================================================
--- head/sys/netpfil/pf/pf_if.c Thu Jan 22 00:52:34 2015        (r277518)
+++ head/sys/netpfil/pf/pf_if.c Thu Jan 22 01:23:16 2015        (r277519)
@@ -102,13 +102,10 @@ MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf
 LIST_HEAD(pfi_list, pfi_kif);
 static VNET_DEFINE(struct pfi_list, pfi_unlinked_kifs);
 #define        V_pfi_unlinked_kifs     VNET(pfi_unlinked_kifs)
-
 static struct mtx pfi_unlnkdkifs_mtx;
-MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces",
-    MTX_DEF);
 
 void
-pfi_vnet_initialize(void)
+pfi_initialize(void)
 {
        struct ifg_group *ifg;
        struct ifnet *ifp;
@@ -117,6 +114,9 @@ pfi_vnet_initialize(void)
        V_pfi_buffer_max = 64;
        V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
            PFI_MTYPE, M_WAITOK);
+
+       mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF);
+
        kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
        PF_RULES_WLOCK();
        V_pfi_all = pfi_kif_attach(kif, IFG_ALL);
@@ -129,20 +129,18 @@ pfi_vnet_initialize(void)
                pfi_attach_ifnet(ifp);
        IFNET_RUNLOCK();
 
-       if (IS_DEFAULT_VNET(curvnet)) {
-           pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
-               pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
-           pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
-               pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
-           pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
-               pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
-           pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
-               pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
-           pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
-               pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
-           pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
-               pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
-       }
+       pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
+           pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
+       pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
+           pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
+       pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
+           pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
+       pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
+           pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
+       pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
+           pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
+       pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
+           pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
 }
 
 void
@@ -168,6 +166,8 @@ pfi_cleanup(void)
                free(p, PFI_MTYPE);
        }
 
+       mtx_destroy(&pfi_unlnkdkifs_mtx);
+
        free(V_pfi_buffer, PFI_MTYPE);
 }
 

Modified: head/sys/netpfil/pf/pf_ioctl.c
==============================================================================
--- head/sys/netpfil/pf/pf_ioctl.c      Thu Jan 22 00:52:34 2015        
(r277518)
+++ head/sys/netpfil/pf/pf_ioctl.c      Thu Jan 22 01:23:16 2015        
(r277519)
@@ -87,7 +87,7 @@ __FBSDID("$FreeBSD$");
 #include <altq/altq.h>
 #endif
 
-static int              pf_vnet_init(void);
+static int              pfattach(void);
 static struct pf_pool  *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
                            u_int8_t, u_int8_t, u_int8_t);
 
@@ -189,6 +189,7 @@ static struct cdevsw pf_cdevsw = {
 
 static volatile VNET_DEFINE(int, pf_pfil_hooked);
 #define V_pf_pfil_hooked       VNET(pf_pfil_hooked)
+VNET_DEFINE(int,               pf_end_threads);
 
 struct rwlock                  pf_rules_lock;
 struct sx                      pf_ioctl_lock;
@@ -204,20 +205,17 @@ pfsync_defer_t                    *pfsync_defer_ptr = NUL
 pflog_packet_t                 *pflog_packet_ptr = NULL;
 
 static int
-pf_vnet_init(void)
+pfattach(void)
 {
        u_int32_t *my_timeout = V_pf_default_rule.timeout;
        int error;
 
        if (IS_DEFAULT_VNET(curvnet))
                pf_mtag_initialize();
-       TAILQ_INIT(&V_pf_tags);
-       TAILQ_INIT(&V_pf_qids);
-
-       pf_vnet_initialize();
+       pf_initialize();
        pfr_initialize();
-       pfi_vnet_initialize();
-       pf_vnet_normalize_init();
+       pfi_initialize();
+       pf_normalize_init();
 
        V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
        V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
@@ -278,13 +276,10 @@ pf_vnet_init(void)
        for (int i = 0; i < SCNT_MAX; i++)
                V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
 
-       if (IS_DEFAULT_VNET(curvnet)) {
-           if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0,
-               "pf purge")) != 0) {
-                   /* XXXGL: leaked all above. */
-                   return (error);
-           }
-       }
+       if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0,
+           "pf purge")) != 0)
+               /* XXXGL: leaked all above. */
+               return (error);
        if ((error = swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
            INTR_MPSAFE, &V_pf_swi_cookie)) != 0)
                /* XXXGL: leaked all above. */
@@ -3720,11 +3715,27 @@ dehook_pf(void)
 static int
 pf_load(void)
 {
+       int error;
+
+       VNET_ITERATOR_DECL(vnet_iter);
+
+       VNET_LIST_RLOCK();
+       VNET_FOREACH(vnet_iter) {
+               CURVNET_SET(vnet_iter);
+               V_pf_pfil_hooked = 0;
+               V_pf_end_threads = 0;
+               TAILQ_INIT(&V_pf_tags);
+               TAILQ_INIT(&V_pf_qids);
+               CURVNET_RESTORE();
+       }
+       VNET_LIST_RUNLOCK();
 
        rw_init(&pf_rules_lock, "pf rulesets");
        sx_init(&pf_ioctl_lock, "pf ioctl");
 
        pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
+       if ((error = pfattach()) != 0)
+               return (error);
 
        return (0);
 }
@@ -3748,6 +3759,11 @@ pf_unload(void)
        }
        PF_RULES_WLOCK();
        shutdown_pf();
+       V_pf_end_threads = 1;
+       while (V_pf_end_threads < 2) {
+               wakeup_one(pf_purge_thread);
+               rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0);
+       }
        pf_normalize_cleanup();
        pfi_cleanup();
        pfr_cleanup();
@@ -3797,5 +3813,3 @@ static moduledata_t pf_mod = {
 
 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
 MODULE_VERSION(pf, PF_MODVER);
-VNET_SYSINIT(pf_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY - 255,
-           pf_vnet_init, NULL);

Modified: head/sys/netpfil/pf/pf_norm.c
==============================================================================
--- head/sys/netpfil/pf/pf_norm.c       Thu Jan 22 00:52:34 2015        
(r277518)
+++ head/sys/netpfil/pf/pf_norm.c       Thu Jan 22 01:23:16 2015        
(r277519)
@@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$");
 #include "opt_pf.h"
 
 #include <sys/param.h>
-#include <sys/kernel.h>
 #include <sys/lock.h>
 #include <sys/mbuf.h>
 #include <sys/mutex.h>
@@ -93,7 +92,6 @@ struct pf_fragment {
 };
 
 static struct mtx pf_frag_mtx;
-MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF);
 #define PF_FRAG_LOCK()         mtx_lock(&pf_frag_mtx)
 #define PF_FRAG_UNLOCK()       mtx_unlock(&pf_frag_mtx)
 #define PF_FRAG_ASSERT()       mtx_assert(&pf_frag_mtx, MA_OWNED)
@@ -148,7 +146,7 @@ static void          pf_scrub_ip6(struct mbuf *
 } while(0)
 
 void
-pf_vnet_normalize_init(void)
+pf_normalize_init(void)
 {
 
        V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
@@ -163,6 +161,9 @@ pf_vnet_normalize_init(void)
        V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
        uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
        uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
+
+       mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
+
        TAILQ_INIT(&V_pf_fragqueue);
        TAILQ_INIT(&V_pf_cachequeue);
 }
@@ -174,6 +175,8 @@ pf_normalize_cleanup(void)
        uma_zdestroy(V_pf_state_scrub_z);
        uma_zdestroy(V_pf_frent_z);
        uma_zdestroy(V_pf_frag_z);
+
+       mtx_destroy(&pf_frag_mtx);
 }
 
 static int

Modified: head/sys/netpfil/pf/pf_table.c
==============================================================================
--- head/sys/netpfil/pf/pf_table.c      Thu Jan 22 00:52:34 2015        
(r277518)
+++ head/sys/netpfil/pf/pf_table.c      Thu Jan 22 01:23:16 2015        
(r277519)
@@ -184,13 +184,9 @@ static struct pfr_kentry
 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, 
pfr_ktable_compare);
 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
 
-VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
-#define V_pfr_ktables          VNET(pfr_ktables)
-
+struct pfr_ktablehead   pfr_ktables;
 struct pfr_table        pfr_nulltable;
-
-VNET_DEFINE(int, pfr_ktable_cnt);
-#define V_pfr_ktable_cnt       VNET(pfr_ktable_cnt)
+int                     pfr_ktable_cnt;
 
 void
 pfr_initialize(void)
@@ -1087,7 +1083,7 @@ pfr_clr_tables(struct pfr_table *filter,
                return (ENOENT);
 
        SLIST_INIT(&workq);
-       RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+       RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
                if (pfr_skip_table(filter, p, flags))
                        continue;
                if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
@@ -1122,7 +1118,7 @@ pfr_add_tables(struct pfr_table *tbl, in
                    flags & PFR_FLAG_USERIOCTL))
                        senderr(EINVAL);
                key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
-               p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+               p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
                if (p == NULL) {
                        p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
                        if (p == NULL)
@@ -1138,7 +1134,7 @@ pfr_add_tables(struct pfr_table *tbl, in
 
                        /* find or create root table */
                        bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
-                       r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+                       r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
                        if (r != NULL) {
                                p->pfrkt_root = r;
                                goto _skip;
@@ -1194,7 +1190,7 @@ pfr_del_tables(struct pfr_table *tbl, in
                if (pfr_validate_table(&key.pfrkt_t, 0,
                    flags & PFR_FLAG_USERIOCTL))
                        return (EINVAL);
-               p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+               p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
                if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
                        SLIST_FOREACH(q, &workq, pfrkt_workq)
                                if (!pfr_ktable_compare(p, q))
@@ -1233,7 +1229,7 @@ pfr_get_tables(struct pfr_table *filter,
                *size = n;
                return (0);
        }
-       RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+       RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
                if (pfr_skip_table(filter, p, flags))
                        continue;
                if (n-- <= 0)
@@ -1268,7 +1264,7 @@ pfr_get_tstats(struct pfr_table *filter,
                return (0);
        }
        SLIST_INIT(&workq);
-       RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+       RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
                if (pfr_skip_table(filter, p, flags))
                        continue;
                if (n-- <= 0)
@@ -1300,7 +1296,7 @@ pfr_clr_tstats(struct pfr_table *tbl, in
                bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
                if (pfr_validate_table(&key.pfrkt_t, 0, 0))
                        return (EINVAL);
-               p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+               p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
                if (p != NULL) {
                        SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
                        xzero++;
@@ -1332,7 +1328,7 @@ pfr_set_tflags(struct pfr_table *tbl, in
                if (pfr_validate_table(&key.pfrkt_t, 0,
                    flags & PFR_FLAG_USERIOCTL))
                        return (EINVAL);
-               p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+               p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
                if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
                        p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
                            ~clrflag;
@@ -1374,7 +1370,7 @@ pfr_ina_begin(struct pfr_table *trs, u_i
        if (rs == NULL)
                return (ENOMEM);
        SLIST_INIT(&workq);
-       RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+       RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
                if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
                    pfr_skip_table(trs, p, 0))
                        continue;
@@ -1419,7 +1415,7 @@ pfr_ina_define(struct pfr_table *tbl, st
                return (EBUSY);
        tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
        SLIST_INIT(&tableq);
-       kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
+       kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
        if (kt == NULL) {
                kt = pfr_create_ktable(tbl, 0, 1);
                if (kt == NULL)
@@ -1432,7 +1428,7 @@ pfr_ina_define(struct pfr_table *tbl, st
                /* find or create root table */
                bzero(&key, sizeof(key));
                strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
-               rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+               rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
                if (rt != NULL) {
                        kt->pfrkt_root = rt;
                        goto _skip;
@@ -1509,7 +1505,7 @@ pfr_ina_rollback(struct pfr_table *trs, 
        if (rs == NULL || !rs->topen || ticket != rs->tticket)
                return (0);
        SLIST_INIT(&workq);
-       RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+       RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
                if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
                    pfr_skip_table(trs, p, 0))
                        continue;
@@ -1545,7 +1541,7 @@ pfr_ina_commit(struct pfr_table *trs, u_
                return (EBUSY);
 
        SLIST_INIT(&workq);
-       RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+       RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
                if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
                    pfr_skip_table(trs, p, 0))
                        continue;
@@ -1691,7 +1687,7 @@ pfr_table_count(struct pfr_table *filter
        PF_RULES_ASSERT();
 
        if (flags & PFR_FLAG_ALLRSETS)
-               return (V_pfr_ktable_cnt);
+               return (pfr_ktable_cnt);
        if (filter->pfrt_anchor[0]) {
                rs = pf_find_ruleset(filter->pfrt_anchor);
                return ((rs != NULL) ? rs->tables : -1);
@@ -1724,8 +1720,8 @@ pfr_insert_ktable(struct pfr_ktable *kt)
 
        PF_RULES_WASSERT();
 
-       RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
-       V_pfr_ktable_cnt++;
+       RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
+       pfr_ktable_cnt++;
        if (kt->pfrkt_root != NULL)
                if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
                        pfr_setflags_ktable(kt->pfrkt_root,
@@ -1756,14 +1752,14 @@ pfr_setflags_ktable(struct pfr_ktable *k
        if (!(newf & PFR_TFLAG_ACTIVE))
                newf &= ~PFR_TFLAG_USRMASK;
        if (!(newf & PFR_TFLAG_SETMASK)) {
-               RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
+               RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
                if (kt->pfrkt_root != NULL)
                        if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
                                pfr_setflags_ktable(kt->pfrkt_root,
                                    kt->pfrkt_root->pfrkt_flags &
                                        ~PFR_TFLAG_REFDANCHOR);
                pfr_destroy_ktable(kt, 1);
-               V_pfr_ktable_cnt--;
+               pfr_ktable_cnt--;
                return;
        }
        if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
@@ -1884,7 +1880,7 @@ static struct pfr_ktable *
 pfr_lookup_table(struct pfr_table *tbl)
 {
        /* struct pfr_ktable start like a struct pfr_table */
-       return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
+       return (RB_FIND(pfr_ktablehead, &pfr_ktables,
            (struct pfr_ktable *)tbl));
 }
 
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to