Author: kp
Date: Fri Mar 29 14:34:51 2019
New Revision: 345692
URL: https://svnweb.freebsd.org/changeset/base/345692

Log:
  MFC r345177:
  
  pf :Use counter(9) in pf tables.
  
  The counters of pf tables are updated outside the rule lock. That means state
  updates might overwrite each other. Furthermore allocation and
  freeing of counters happens outside the lock as well.
  
  Use counter(9) for the counters, and always allocate the counter table
  element, so that the race condition cannot happen any more.
  
  PR:           230619
  Submitted by: Kajetan Staszkiewicz <veg...@tuxpowered.net>

Modified:
  stable/12/sys/net/pfvar.h
  stable/12/sys/netpfil/pf/pf_table.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/net/pfvar.h
==============================================================================
--- stable/12/sys/net/pfvar.h   Fri Mar 29 14:34:50 2019        (r345691)
+++ stable/12/sys/net/pfvar.h   Fri Mar 29 14:34:51 2019        (r345692)
@@ -1021,6 +1021,17 @@ struct pfr_tstats {
        int              pfrts_cnt;
        int              pfrts_refcnt[PFR_REFCNT_MAX];
 };
+
+struct pfr_ktstats {
+       struct pfr_table pfrts_t;
+       counter_u64_t    pfrkts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX];
+       counter_u64_t    pfrkts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX];
+       counter_u64_t    pfrkts_match;
+       counter_u64_t    pfrkts_nomatch;
+       long             pfrkts_tzero;
+       int              pfrkts_cnt;
+       int              pfrkts_refcnt[PFR_REFCNT_MAX];
+};
 #define        pfrts_name      pfrts_t.pfrt_name
 #define pfrts_flags    pfrts_t.pfrt_flags
 
@@ -1034,8 +1045,9 @@ union sockaddr_union {
 #endif /* _SOCKADDR_UNION_DEFINED */
 
 struct pfr_kcounters {
-       u_int64_t                pfrkc_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
-       u_int64_t                pfrkc_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+       counter_u64_t            pfrkc_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+       counter_u64_t            pfrkc_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+       long                     pfrkc_tzero;
 };
 
 SLIST_HEAD(pfr_kentryworkq, pfr_kentry);
@@ -1043,8 +1055,7 @@ struct pfr_kentry {
        struct radix_node        pfrke_node[2];
        union sockaddr_union     pfrke_sa;
        SLIST_ENTRY(pfr_kentry)  pfrke_workq;
-       struct pfr_kcounters    *pfrke_counters;
-       long                     pfrke_tzero;
+       struct pfr_kcounters     pfrke_counters;
        u_int8_t                 pfrke_af;
        u_int8_t                 pfrke_net;
        u_int8_t                 pfrke_not;
@@ -1054,7 +1065,7 @@ struct pfr_kentry {
 SLIST_HEAD(pfr_ktableworkq, pfr_ktable);
 RB_HEAD(pfr_ktablehead, pfr_ktable);
 struct pfr_ktable {
-       struct pfr_tstats        pfrkt_ts;
+       struct pfr_ktstats       pfrkt_kts;
        RB_ENTRY(pfr_ktable)     pfrkt_tree;
        SLIST_ENTRY(pfr_ktable)  pfrkt_workq;
        struct radix_node_head  *pfrkt_ip4;
@@ -1065,18 +1076,18 @@ struct pfr_ktable {
        long                     pfrkt_larg;
        int                      pfrkt_nflags;
 };
-#define pfrkt_t                pfrkt_ts.pfrts_t
+#define pfrkt_t                pfrkt_kts.pfrts_t
 #define pfrkt_name     pfrkt_t.pfrt_name
 #define pfrkt_anchor   pfrkt_t.pfrt_anchor
 #define pfrkt_ruleset  pfrkt_t.pfrt_ruleset
 #define pfrkt_flags    pfrkt_t.pfrt_flags
-#define pfrkt_cnt      pfrkt_ts.pfrts_cnt
-#define pfrkt_refcnt   pfrkt_ts.pfrts_refcnt
-#define pfrkt_packets  pfrkt_ts.pfrts_packets
-#define pfrkt_bytes    pfrkt_ts.pfrts_bytes
-#define pfrkt_match    pfrkt_ts.pfrts_match
-#define pfrkt_nomatch  pfrkt_ts.pfrts_nomatch
-#define pfrkt_tzero    pfrkt_ts.pfrts_tzero
+#define pfrkt_cnt      pfrkt_kts.pfrkts_cnt
+#define pfrkt_refcnt   pfrkt_kts.pfrkts_refcnt
+#define pfrkt_packets  pfrkt_kts.pfrkts_packets
+#define pfrkt_bytes    pfrkt_kts.pfrkts_bytes
+#define pfrkt_match    pfrkt_kts.pfrkts_match
+#define pfrkt_nomatch  pfrkt_kts.pfrkts_nomatch
+#define pfrkt_tzero    pfrkt_kts.pfrkts_tzero
 
 /* keep synced with pfi_kif, used in RB_FIND */
 struct pfi_kif_cmp {

Modified: stable/12/sys/netpfil/pf/pf_table.c
==============================================================================
--- stable/12/sys/netpfil/pf/pf_table.c Fri Mar 29 14:34:50 2019        
(r345691)
+++ stable/12/sys/netpfil/pf/pf_table.c Fri Mar 29 14:34:51 2019        
(r345692)
@@ -113,6 +113,7 @@ struct pfr_walktree {
                struct pfi_dynaddr      *pfrw1_dyn;
        }        pfrw_1;
        int      pfrw_free;
+       int      pfrw_flags;
 };
 #define        pfrw_addr       pfrw_1.pfrw1_addr
 #define        pfrw_astats     pfrw_1.pfrw1_astats
@@ -126,15 +127,16 @@ struct pfr_walktree {
 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
 #define        V_pfr_kentry_z          VNET(pfr_kentry_z)
-VNET_DEFINE_STATIC(uma_zone_t, pfr_kcounters_z);
-#define        V_pfr_kcounters_z       VNET(pfr_kcounters_z)
 
 static struct pf_addr   pfr_ffaddr = {
        .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
 };
 
+static void             pfr_copyout_astats(struct pfr_astats *,
+                           const struct pfr_kentry *,
+                           const struct pfr_walktree *);
 static void             pfr_copyout_addr(struct pfr_addr *,
-                           struct pfr_kentry *ke);
+                           const struct pfr_kentry *ke);
 static int              pfr_validate_addr(struct pfr_addr *);
 static void             pfr_enqueue_addrs(struct pfr_ktable *,
                            struct pfr_kentryworkq *, int *, int);
@@ -142,8 +144,12 @@ static void                 pfr_mark_addrs(struct 
pfr_ktable *);
 static struct pfr_kentry
                        *pfr_lookup_addr(struct pfr_ktable *,
                            struct pfr_addr *, int);
+static bool             pfr_create_kentry_counter(struct pfr_kcounters *,
+                           int, int);
 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
 static void             pfr_destroy_kentries(struct pfr_kentryworkq *);
+static void             pfr_destroy_kentry_counter(struct pfr_kcounters *,
+                           int, int);
 static void             pfr_destroy_kentry(struct pfr_kentry *);
 static void             pfr_insert_kentries(struct pfr_ktable *,
                            struct pfr_kentryworkq *, long);
@@ -202,9 +208,6 @@ pfr_initialize(void)
        V_pfr_kentry_z = uma_zcreate("pf table entries",
            sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
            0);
-       V_pfr_kcounters_z = uma_zcreate("pf table counters",
-           sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
-           UMA_ALIGN_PTR, 0);
        V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
        V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
 }
@@ -214,7 +217,6 @@ pfr_cleanup(void)
 {
 
        uma_zdestroy(V_pfr_kentry_z);
-       uma_zdestroy(V_pfr_kcounters_z);
 }
 
 int
@@ -608,6 +610,13 @@ pfr_get_astats(struct pfr_table *tbl, struct pfr_astat
        w.pfrw_op = PFRW_GET_ASTATS;
        w.pfrw_astats = addr;
        w.pfrw_free = kt->pfrkt_cnt;
+       /*
+        * Flags below are for backward compatibility. It was possible to have
+        * a table without per-entry counters. Now they are always allocated,
+        * we just discard data when reading it if table is not configured to
+        * have counters.
+        */
+       w.pfrw_flags = kt->pfrkt_flags;
        rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
        if (!rv)
                rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
@@ -774,10 +783,30 @@ pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr
        return (ke);
 }
 
+static bool
+pfr_create_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
+{
+       kc->pfrkc_packets[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT);
+       if (! kc->pfrkc_packets[pfr_dir][pfr_op])
+               return (false);
+
+       kc->pfrkc_bytes[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT);
+       if (! kc->pfrkc_bytes[pfr_dir][pfr_op]) {
+               /* Previous allocation will be freed through
+                * pfr_destroy_kentry() */
+               return (false);
+       }
+
+       kc->pfrkc_tzero = 0;
+
+       return (true);
+}
+
 static struct pfr_kentry *
 pfr_create_kentry(struct pfr_addr *ad)
 {
        struct pfr_kentry       *ke;
+       int pfr_dir, pfr_op;
 
        ke =  uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
        if (ke == NULL)
@@ -790,6 +819,14 @@ pfr_create_kentry(struct pfr_addr *ad)
        ke->pfrke_af = ad->pfra_af;
        ke->pfrke_net = ad->pfra_net;
        ke->pfrke_not = ad->pfra_not;
+       for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
+               for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) {
+                       if (! pfr_create_kentry_counter(&ke->pfrke_counters,
+                           pfr_dir, pfr_op)) {
+                               pfr_destroy_kentry(ke);
+                               return (NULL);
+                       }
+               }
        return (ke);
 }
 
@@ -805,10 +842,22 @@ pfr_destroy_kentries(struct pfr_kentryworkq *workq)
 }
 
 static void
+pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
+{
+       counter_u64_free(kc->pfrkc_packets[pfr_dir][pfr_op]);
+       counter_u64_free(kc->pfrkc_bytes[pfr_dir][pfr_op]);
+}
+
+static void
 pfr_destroy_kentry(struct pfr_kentry *ke)
 {
-       if (ke->pfrke_counters)
-               uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
+       int pfr_dir, pfr_op;
+
+       for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
+               for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++)
+                       pfr_destroy_kentry_counter(&ke->pfrke_counters,
+                           pfr_dir, pfr_op);
+
        uma_zfree(V_pfr_kentry_z, ke);
 }
 
@@ -826,7 +875,7 @@ pfr_insert_kentries(struct pfr_ktable *kt,
                            "(code=%d).\n", rv);
                        break;
                }
-               p->pfrke_tzero = tzero;
+               p->pfrke_counters.pfrkc_tzero = tzero;
                n++;
        }
        kt->pfrkt_cnt += n;
@@ -849,7 +898,7 @@ pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_ad
        if (rv)
                return (rv);
 
-       p->pfrke_tzero = tzero;
+       p->pfrke_counters.pfrkc_tzero = tzero;
        kt->pfrkt_cnt++;
 
        return (0);
@@ -884,15 +933,20 @@ static void
 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
 {
        struct pfr_kentry       *p;
+       int                      pfr_dir, pfr_op;
 
        SLIST_FOREACH(p, workq, pfrke_workq) {
                if (negchange)
                        p->pfrke_not = !p->pfrke_not;
-               if (p->pfrke_counters) {
-                       uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
-                       p->pfrke_counters = NULL;
+               for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
+                       for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) {
+                               counter_u64_zero(p->pfrke_counters.
+                                       pfrkc_packets[pfr_dir][pfr_op]);
+                               counter_u64_zero(p->pfrke_counters.
+                                       pfrkc_bytes[pfr_dir][pfr_op]);
+                       }
                }
-               p->pfrke_tzero = tzero;
+               p->pfrke_counters.pfrkc_tzero = tzero;
        }
 }
 
@@ -981,7 +1035,7 @@ pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_k
 }
 
 static void
-pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
+pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
 {
        bzero(ad, sizeof(*ad));
        if (ke == NULL)
@@ -995,6 +1049,33 @@ pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentr
                ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
 }
 
+static void
+pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
+    const struct pfr_walktree *w)
+{
+       int dir, op;
+       const struct pfr_kcounters *kc = &ke->pfrke_counters;
+
+       pfr_copyout_addr(&as->pfras_a, ke);
+       as->pfras_tzero = kc->pfrkc_tzero;
+
+       if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) {
+               bzero(as->pfras_packets, sizeof(as->pfras_packets));
+               bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
+               as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
+               return;
+       }
+
+       for (dir = 0; dir < PFR_DIR_MAX; dir ++) {
+               for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
+                       as->pfras_packets[dir][op] =
+                           counter_u64_fetch(kc->pfrkc_packets[dir][op]);
+                       as->pfras_bytes[dir][op] =
+                           counter_u64_fetch(kc->pfrkc_bytes[dir][op]);
+               }
+       }
+}
+
 static int
 pfr_walktree(struct radix_node *rn, void *arg)
 {
@@ -1023,20 +1104,8 @@ pfr_walktree(struct radix_node *rn, void *arg)
                if (w->pfrw_free-- > 0) {
                        struct pfr_astats as;
 
-                       pfr_copyout_addr(&as.pfras_a, ke);
+                       pfr_copyout_astats(&as, ke, w);
 
-                       if (ke->pfrke_counters) {
-                               bcopy(ke->pfrke_counters->pfrkc_packets,
-                                   as.pfras_packets, sizeof(as.pfras_packets));
-                               bcopy(ke->pfrke_counters->pfrkc_bytes,
-                                   as.pfras_bytes, sizeof(as.pfras_bytes));
-                       } else {
-                               bzero(as.pfras_packets, 
sizeof(as.pfras_packets));
-                               bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
-                               as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
-                       }
-                       as.pfras_tzero = ke->pfrke_tzero;
-
                        bcopy(&as, w->pfrw_astats, sizeof(as));
                        w->pfrw_astats++;
                }
@@ -1260,6 +1329,7 @@ pfr_get_tstats(struct pfr_table *filter, struct pfr_ts
        struct pfr_ktableworkq   workq;
        int                      n, nn;
        long                     tzero = time_second;
+       int                      pfr_dir, pfr_op;
 
        /* XXX PFR_FLAG_CLSTATS disabled */
        ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
@@ -1278,7 +1348,25 @@ pfr_get_tstats(struct pfr_table *filter, struct pfr_ts
                        continue;
                if (n-- <= 0)
                        continue;
-               bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
+               bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
+                   sizeof(struct pfr_table));
+               for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
+                       for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
+                               tbl->pfrts_packets[pfr_dir][pfr_op] =
+                                   counter_u64_fetch(
+                                       p->pfrkt_packets[pfr_dir][pfr_op]);
+                               tbl->pfrts_bytes[pfr_dir][pfr_op] =
+                                   counter_u64_fetch(
+                                       p->pfrkt_bytes[pfr_dir][pfr_op]);
+                       }
+               }
+               tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match);
+               tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch);
+               tbl->pfrts_tzero = p->pfrkt_tzero;
+               tbl->pfrts_cnt = p->pfrkt_cnt;
+               for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
+                       tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
+               tbl++;
                SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
        }
        if (flags & PFR_FLAG_CLSTATS)
@@ -1612,7 +1700,7 @@ pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
                                q->pfrke_mark = 1;
                                SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
                        } else {
-                               p->pfrke_tzero = tzero;
+                               p->pfrke_counters.pfrkc_tzero = tzero;
                                SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
                        }
                }
@@ -1796,14 +1884,20 @@ static void
 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
 {
        struct pfr_kentryworkq   addrq;
+       int                      pfr_dir, pfr_op;
 
        if (recurse) {
                pfr_enqueue_addrs(kt, &addrq, NULL, 0);
                pfr_clstats_kentries(&addrq, tzero, 0);
        }
-       bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
-       bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
-       kt->pfrkt_match = kt->pfrkt_nomatch = 0;
+       for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
+               for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
+                       counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]);
+                       counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]);
+               }
+       }
+       counter_u64_zero(kt->pfrkt_match);
+       counter_u64_zero(kt->pfrkt_nomatch);
        kt->pfrkt_tzero = tzero;
 }
 
@@ -1812,6 +1906,7 @@ pfr_create_ktable(struct pfr_table *tbl, long tzero, i
 {
        struct pfr_ktable       *kt;
        struct pf_ruleset       *rs;
+       int                      pfr_dir, pfr_op;
 
        PF_RULES_WASSERT();
 
@@ -1830,6 +1925,34 @@ pfr_create_ktable(struct pfr_table *tbl, long tzero, i
                rs->tables++;
        }
 
+       for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
+               for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
+                       kt->pfrkt_packets[pfr_dir][pfr_op] =
+                           counter_u64_alloc(M_NOWAIT);
+                       if (! kt->pfrkt_packets[pfr_dir][pfr_op]) {
+                               pfr_destroy_ktable(kt, 0);
+                               return (NULL);
+                       }
+                       kt->pfrkt_bytes[pfr_dir][pfr_op] =
+                           counter_u64_alloc(M_NOWAIT);
+                       if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) {
+                               pfr_destroy_ktable(kt, 0);
+                               return (NULL);
+                       }
+               }
+       }
+       kt->pfrkt_match = counter_u64_alloc(M_NOWAIT);
+       if (! kt->pfrkt_match) {
+               pfr_destroy_ktable(kt, 0);
+               return (NULL);
+       }
+
+       kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT);
+       if (! kt->pfrkt_nomatch) {
+               pfr_destroy_ktable(kt, 0);
+               return (NULL);
+       }
+
        if (!rn_inithead((void **)&kt->pfrkt_ip4,
            offsetof(struct sockaddr_in, sin_addr) * 8) ||
            !rn_inithead((void **)&kt->pfrkt_ip6,
@@ -1857,6 +1980,7 @@ static void
 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
 {
        struct pfr_kentryworkq   addrq;
+       int                      pfr_dir, pfr_op;
 
        if (flushaddr) {
                pfr_enqueue_addrs(kt, &addrq, NULL, 0);
@@ -1873,6 +1997,15 @@ pfr_destroy_ktable(struct pfr_ktable *kt, int flushadd
                kt->pfrkt_rs->tables--;
                pf_remove_if_empty_ruleset(kt->pfrkt_rs);
        }
+       for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
+               for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
+                       counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]);
+                       counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]);
+               }
+       }
+       counter_u64_free(kt->pfrkt_match);
+       counter_u64_free(kt->pfrkt_nomatch);
+
        free(kt, M_PFTABLE);
 }
 
@@ -1941,9 +2074,9 @@ pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *
        }
        match = (ke && !ke->pfrke_not);
        if (match)
-               kt->pfrkt_match++;
+               counter_u64_add(kt->pfrkt_match, 1);
        else
-               kt->pfrkt_nomatch++;
+               counter_u64_add(kt->pfrkt_nomatch, 1);
        return (match);
 }
 
@@ -1998,17 +2131,14 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr
                            ("pfr_update_stats: assertion failed.\n"));
                op_pass = PFR_OP_XPASS;
        }
-       kt->pfrkt_packets[dir_out][op_pass]++;
-       kt->pfrkt_bytes[dir_out][op_pass] += len;
+       counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1);
+       counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len);
        if (ke != NULL && op_pass != PFR_OP_XPASS &&
            (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
-               if (ke->pfrke_counters == NULL)
-                       ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
-                           M_NOWAIT | M_ZERO);
-               if (ke->pfrke_counters != NULL) {
-                       ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
-                       ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += 
len;
-               }
+               counter_u64_add(ke->pfrke_counters.
+                   pfrkc_packets[dir_out][op_pass], 1);
+               counter_u64_add(ke->pfrke_counters.
+                   pfrkc_bytes[dir_out][op_pass], len);
        }
 }
 
@@ -2098,7 +2228,7 @@ pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct 
 _next_block:
        ke = pfr_kentry_byidx(kt, idx, af);
        if (ke == NULL) {
-               kt->pfrkt_nomatch++;
+               counter_u64_add(kt->pfrkt_nomatch, 1);
                return (1);
        }
        pfr_prepare_network(&umask, af, ke->pfrke_net);
@@ -2123,7 +2253,7 @@ _next_block:
                /* this is a single IP address - no possible nested block */
                PF_ACPY(counter, addr, af);
                *pidx = idx;
-               kt->pfrkt_match++;
+               counter_u64_add(kt->pfrkt_match, 1);
                return (0);
        }
        for (;;) {
@@ -2143,7 +2273,7 @@ _next_block:
                        /* lookup return the same block - perfect */
                        PF_ACPY(counter, addr, af);
                        *pidx = idx;
-                       kt->pfrkt_match++;
+                       counter_u64_add(kt->pfrkt_match, 1);
                        return (0);
                }
 
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to