Hi everyone, this mail includes a patch to store pf rules in a red-black tree. Currently they are stored in a linked list. My system configured with 16000 rules takes about 10 minutes to print them out using `pfctl -sr`. This patch decreases the time to 4 seconds. I was not able to measure a time increase for rule insertion. Inserting a lot of labels is still very slow. This has to be attacked separatly.
diff --git sbin/pfctl/parse.y sbin/pfctl/parse.y
index 8a92a7e895c..c6f8629c690 100644
--- sbin/pfctl/parse.y
+++ sbin/pfctl/parse.y
@@ -5548,11 +5548,13 @@ mv_rules(struct pf_ruleset *src, struct pf_ruleset *dst)
{
struct pf_rule *r;
- TAILQ_FOREACH(r, src->rules.active.ptr, entries)
+ RB_FOREACH(r, pf_ruletree, src->rules.active.ptr) {
dst->anchor->match++;
- TAILQ_CONCAT(dst->rules.active.ptr, src->rules.active.ptr, entries);
+ RB_INSERT(pf_ruletree, dst->rules.active.ptr, r);
+ }
src->anchor->match = 0;
- TAILQ_CONCAT(dst->rules.inactive.ptr, src->rules.inactive.ptr, entries);
+ RB_FOREACH(r, pf_ruletree, src->rules.inactive.ptr)
+ RB_INSERT(pf_ruletree, dst->rules.inactive.ptr, r);
}
void
diff --git sbin/pfctl/pfctl.c sbin/pfctl/pfctl.c
index 9f14b955ca7..49aafbed4e5 100644
--- sbin/pfctl/pfctl.c
+++ sbin/pfctl/pfctl.c
@@ -1170,7 +1170,7 @@ pfctl_add_rule(struct pfctl *pf, struct pf_rule *r)
err(1, "calloc");
bcopy(r, rule, sizeof(*rule));
- TAILQ_INSERT_TAIL(rs->rules.active.ptr, rule, entries);
+ RB_INSERT(pf_ruletree, rs->rules.active.ptr, rule);
}
int
@@ -1409,7 +1409,7 @@ pfctl_check_qassignments(struct pf_ruleset *rs)
}
}
- TAILQ_FOREACH(r, rs->rules.active.ptr, entries) {
+ RB_FOREACH(r, pf_ruletree, rs->rules.active.ptr) {
if (r->anchor)
errs += pfctl_check_qassignments(&r->anchor->ruleset);
if (pfctl_leafqueue_check(r->qname) ||
@@ -1436,7 +1436,7 @@ pfctl_load_ruleset(struct pfctl *pf, char *path, struct
pf_ruleset *rs,
snprintf(&path[len], PATH_MAX - len, "%s", pf->anchor->path);
if (depth) {
- if (TAILQ_FIRST(rs->rules.active.ptr) != NULL) {
+ if (!RB_EMPTY(rs->rules.active.ptr)) {
brace++;
if (pf->opts & PF_OPT_VERBOSE)
printf(" {\n");
@@ -1454,8 +1454,8 @@ pfctl_load_ruleset(struct pfctl *pf, char *path, struct
pf_ruleset *rs,
if (pf->optimize)
pfctl_optimize_ruleset(pf, rs);
- while ((r = TAILQ_FIRST(rs->rules.active.ptr)) != NULL) {
- TAILQ_REMOVE(rs->rules.active.ptr, r, entries);
+ while ((r = RB_MIN(pf_ruletree, rs->rules.active.ptr)) != NULL) {
+ RB_REMOVE(pf_ruletree, rs->rules.active.ptr, r);
pfctl_expand_label_nr(r, rno);
rno++;
if ((error = pfctl_load_rule(pf, path, r, depth)))
diff --git sbin/pfctl/pfctl_optimize.c sbin/pfctl/pfctl_optimize.c
index dc93b882bef..d0ba5c2aca4 100644
--- sbin/pfctl/pfctl_optimize.c
+++ sbin/pfctl/pfctl_optimize.c
@@ -191,6 +191,7 @@ struct pf_rule_field {
PF_RULE_FIELD(src_nodes, DC),
PF_RULE_FIELD(nr, DC),
PF_RULE_FIELD(entries, DC),
+ PF_RULE_FIELD(usage, DC),
PF_RULE_FIELD(qid, DC),
PF_RULE_FIELD(pqid, DC),
PF_RULE_FIELD(anchor_relative, DC),
@@ -268,9 +269,9 @@ pfctl_optimize_ruleset(struct pfctl *pf, struct pf_ruleset
*rs)
struct superblock *block;
struct pf_opt_rule *por;
struct pf_rule *r;
- struct pf_rulequeue *old_rules;
+ struct pf_ruletree *old_rules;
- if (TAILQ_EMPTY(rs->rules.active.ptr))
+ if (RB_EMPTY(rs->rules.active.ptr))
return (0);
DEBUG("optimizing ruleset \"%s\"", rs->anchor->path);
@@ -286,8 +287,8 @@ pfctl_optimize_ruleset(struct pfctl *pf, struct pf_ruleset
*rs)
* XXX expanding the pf_opt_rule format throughout pfctl might allow
* us to avoid all this copying.
*/
- while ((r = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
- TAILQ_REMOVE(rs->rules.inactive.ptr, r, entries);
+ while ((r = RB_MIN(pf_ruletree, rs->rules.inactive.ptr)) != NULL) {
+ RB_REMOVE(pf_ruletree, rs->rules.inactive.ptr, r);
if ((por = calloc(1, sizeof(*por))) == NULL)
err(1, "calloc");
memcpy(&por->por_rule, r, sizeof(*r));
@@ -319,7 +320,7 @@ pfctl_optimize_ruleset(struct pfctl *pf, struct pf_ruleset
*rs)
if ((r = calloc(1, sizeof(*r))) == NULL)
err(1, "calloc");
memcpy(r, &por->por_rule, sizeof(*r));
- TAILQ_INSERT_TAIL(rs->rules.active.ptr, r, entries);
+ RB_INSERT(pf_ruletree, rs->rules.active.ptr, r);
pf_opt_table_unref(por->por_src_tbl);
pf_opt_table_unref(por->por_dst_tbl);
free(por);
diff --git sys/net/if_pfsync.c sys/net/if_pfsync.c
index c78ca62766e..91c4fcc0493 100644
--- sys/net/if_pfsync.c
+++ sys/net/if_pfsync.c
@@ -557,7 +557,7 @@ pfsync_state_import(struct pfsync_state *sp, int flags)
if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
(flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
pf_main_ruleset.rules.active.rcount) {
- TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries)
+ RB_FOREACH(r, pf_ruletree, pf_main_ruleset.rules.active.ptr)
if (ntohl(sp->rule) == n++)
break;
} else
diff --git sys/net/pf.c sys/net/pf.c
index f23968f07c7..3143711d455 100644
--- sys/net/pf.c
+++ sys/net/pf.c
@@ -1790,21 +1790,21 @@ pf_print_flags(u_int8_t f)
addlog("W");
}
-#define PF_SET_SKIP_STEPS(i) \
- do { \
- while (head[i] != cur) { \
- head[i]->skip[i].ptr = cur; \
- head[i] = TAILQ_NEXT(head[i], entries); \
- } \
+#define PF_SET_SKIP_STEPS(i)
\
+ do { \
+ while (head[i] != cur) { \
+ head[i]->skip[i].ptr = cur; \
+ head[i] = RB_NEXT(pf_ruletree, rules, head[i]); \
+ } \
} while (0)
void
-pf_calc_skip_steps(struct pf_rulequeue *rules)
+pf_calc_skip_steps(struct pf_ruletree *rules)
{
struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
int i;
- cur = TAILQ_FIRST(rules);
+ cur = RB_MIN(pf_ruletree, rules);
prev = cur;
for (i = 0; i < PF_SKIP_COUNT; ++i)
head[i] = cur;
@@ -1836,7 +1836,7 @@ pf_calc_skip_steps(struct pf_rulequeue *rules)
PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
prev = cur;
- cur = TAILQ_NEXT(cur, entries);
+ cur = RB_NEXT(pf_ruletree, rules, cur);
}
for (i = 0; i < PF_SKIP_COUNT; ++i)
PF_SET_SKIP_STEPS(i);
@@ -3606,8 +3606,9 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_ruleset
*ruleset)
struct pf_rule *r;
struct pf_rule *save_a;
struct pf_ruleset *save_aruleset;
+ struct pf_ruletree *head = ruleset->rules.active.ptr;
- r = TAILQ_FIRST(ruleset->rules.active.ptr);
+ r = RB_MIN(pf_ruletree, head);
while (r != NULL) {
r->evaluations++;
PF_TEST_ATTRIB(
@@ -3634,26 +3635,26 @@ pf_match_rule(struct pf_test_ctx *ctx, struct
pf_ruleset *ruleset)
case PF_VPROTO_FRAGMENT:
/* tcp/udp only. port_op always 0 in other cases */
PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((ctx->pd->proto == IPPROTO_TCP &&
r->flagset),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* icmp only. type/code always 0 in other cases */
PF_TEST_ATTRIB((r->type || r->code),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* tcp/udp only. {uid|gid}.op always 0 in other cases */
PF_TEST_ATTRIB((r->gid.op || r->uid.op),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
break;
case IPPROTO_TCP:
PF_TEST_ATTRIB(((r->flagset & ctx->th->th_flags) !=
r->flags),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((r->os_fingerprint != PF_OSFP_ANY &&
!pf_osfp_match(pf_osfp_fingerprint(ctx->pd),
r->os_fingerprint)),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* FALLTHROUGH */
case IPPROTO_UDP:
@@ -3672,14 +3673,14 @@ pf_match_rule(struct pf_test_ctx *ctx, struct
pf_ruleset *ruleset)
pf_socket_lookup(ctx->pd), 1)) &&
!pf_match_uid(r->uid.op, r->uid.uid[0],
r->uid.uid[1], ctx->pd->lookup.uid)),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* tcp/udp only. gid.op always 0 in other cases */
PF_TEST_ATTRIB((r->gid.op && (ctx->pd->lookup.done ||
(ctx->pd->lookup.done =
pf_socket_lookup(ctx->pd), 1)) &&
!pf_match_gid(r->gid.op, r->gid.gid[0],
r->gid.gid[1], ctx->pd->lookup.gid)),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
break;
case IPPROTO_ICMP:
@@ -3687,16 +3688,16 @@ pf_match_rule(struct pf_test_ctx *ctx, struct
pf_ruleset *ruleset)
/* icmp only. type always 0 in other cases */
PF_TEST_ATTRIB((r->type &&
r->type != ctx->icmptype + 1),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* icmp only. type always 0 in other cases */
PF_TEST_ATTRIB((r->code &&
r->code != ctx->icmpcode + 1),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* icmp only. don't create states on replies */
PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & PFRULE_STATESLOPPY) == 0 &&
ctx->icmp_dir != PF_IN),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
break;
default:
@@ -3705,28 +3706,28 @@ pf_match_rule(struct pf_test_ctx *ctx, struct
pf_ruleset *ruleset)
PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT &&
ctx->pd->virtual_proto != PF_VPROTO_FRAGMENT),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((r->tos && !(r->tos == ctx->pd->tos)),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((r->prob &&
r->prob <= arc4random_uniform(UINT_MAX - 1) + 1),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((r->match_tag &&
!pf_match_tag(ctx->pd->m, r, &ctx->tag)),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(ctx->pd->m, r) ==
r->rcvifnot),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
PF_TEST_ATTRIB((r->prio &&
(r->prio == PF_PRIO_ZERO ? 0 : r->prio) !=
ctx->pd->m->m_pkthdr.pf.prio),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
/* must be last! */
if (r->pktrate.limit) {
pf_add_threshold(&r->pktrate);
PF_TEST_ATTRIB((pf_check_threshold(&r->pktrate)),
- TAILQ_NEXT(r, entries));
+ RB_NEXT(pf_ruletree, head, r));
}
/* FALLTHROUGH */
@@ -3804,7 +3805,7 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_ruleset
*ruleset)
ctx->a = save_a;
ctx->aruleset = save_aruleset;
}
- r = TAILQ_NEXT(r, entries);
+ r = RB_NEXT(pf_ruletree, head, r);
}
return (ctx->test_status);
diff --git sys/net/pf_ioctl.c sys/net/pf_ioctl.c
index b540afc6fe2..b4ef2dd15bb 100644
--- sys/net/pf_ioctl.c
+++ sys/net/pf_ioctl.c
@@ -218,7 +218,7 @@ pfattach(int num)
pf_queues_inactive = &pf_queues[1];
/* default rule should never be garbage collected */
- pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
+ pf_default_rule.usage = PF_USAGE_DEFAULT;
pf_default_rule.action = PF_PASS;
pf_default_rule.nr = (u_int32_t)-1;
pf_default_rule.rtableid = -1;
@@ -294,9 +294,9 @@ pf_rule_free(struct pf_rule *rule)
}
void
-pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
+pf_rm_rule(struct pf_ruletree *ruletree, struct pf_rule *rule)
{
- if (rulequeue != NULL) {
+ if (ruletree != NULL) {
if (rule->states_cur == 0 && rule->src_nodes == 0) {
/*
* XXX - we need to remove the table *before* detaching
@@ -311,13 +311,14 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule
*rule)
if (rule->overload_tbl)
pfr_detach_table(rule->overload_tbl);
}
- TAILQ_REMOVE(rulequeue, rule, entries);
- rule->entries.tqe_prev = NULL;
+ RB_REMOVE(pf_ruletree, ruletree, rule);
+ rule->usage = PF_USAGE_UNUSED;
rule->nr = (u_int32_t)-1;
}
+ ///XXX: here i am
if (rule->states_cur > 0 || rule->src_nodes > 0 ||
- rule->entries.tqe_prev != NULL)
+ rule->usage != PF_USAGE_UNUSED)
return;
pf_tag_unref(rule->tag);
pf_tag_unref(rule->match_tag);
@@ -328,7 +329,7 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule
*rule)
pfi_dynaddr_remove(&rule->rdr.addr);
pfi_dynaddr_remove(&rule->nat.addr);
pfi_dynaddr_remove(&rule->route.addr);
- if (rulequeue == NULL) {
+ if (ruletree == NULL) {
pf_tbladdr_remove(&rule->src.addr);
pf_tbladdr_remove(&rule->dst.addr);
pf_tbladdr_remove(&rule->rdr.addr);
@@ -357,7 +358,7 @@ pf_purge_rule(struct pf_rule *rule)
pf_rm_rule(ruleset->rules.active.ptr, rule);
ruleset->rules.active.rcount--;
- TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
+ RB_FOREACH(rule, pf_ruletree, ruleset->rules.active.ptr)
rule->nr = nr++;
ruleset->rules.active.ticket++;
pf_calc_skip_steps(ruleset->rules.active.ptr);
@@ -532,7 +533,7 @@ pf_begin_rules(u_int32_t *ticket, const char *anchor)
if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
return (EINVAL);
- while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
+ while ((rule = RB_MIN(pf_ruletree, rs->rules.inactive.ptr)) != NULL) {
pf_rm_rule(rs->rules.inactive.ptr, rule);
rs->rules.inactive.rcount--;
}
@@ -551,7 +552,7 @@ pf_rollback_rules(u_int32_t ticket, char *anchor)
if (rs == NULL || !rs->rules.inactive.open ||
rs->rules.inactive.ticket != ticket)
return;
- while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
+ while ((rule = RB_MIN(pf_ruletree, rs->rules.inactive.ptr)) != NULL) {
pf_rm_rule(rs->rules.inactive.ptr, rule);
rs->rules.inactive.rcount--;
}
@@ -832,7 +833,7 @@ pf_commit_rules(u_int32_t ticket, char *anchor)
{
struct pf_ruleset *rs;
struct pf_rule *rule;
- struct pf_rulequeue *old_rules;
+ struct pf_ruletree *old_rules;
u_int32_t old_rcount;
/* Make sure any expired rules get removed from active rules first. */
@@ -860,7 +861,7 @@ pf_commit_rules(u_int32_t ticket, char *anchor)
/* Purge the old rule list. */
- while ((rule = TAILQ_FIRST(old_rules)) != NULL)
+ while ((rule = RB_MIN(pf_ruletree, old_rules)) != NULL)
pf_rm_rule(old_rules, rule);
rs->rules.inactive.rcount = 0;
rs->rules.inactive.open = 0;
@@ -882,9 +883,8 @@ pf_calc_chksum(struct pf_ruleset *rs)
MD5Init(&ctx);
if (rs->rules.inactive.rcount) {
- TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
+ RB_FOREACH(rule, pf_ruletree, rs->rules.inactive.ptr)
pf_hash_rule(&ctx, rule);
- }
}
MD5Final(digest, &ctx);
@@ -1341,7 +1341,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
case DIOCADDRULE: {
struct pfioc_rule *pr = (struct pfioc_rule *)addr;
struct pf_ruleset *ruleset;
- struct pf_rule *rule, *tail;
+ struct pf_rule *rule, *last;
rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
if (rule == NULL) {
@@ -1402,10 +1402,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
rule->cuid = p->p_ucred->cr_ruid;
rule->cpid = p->p_p->ps_pid;
- tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
- pf_rulequeue);
- if (tail)
- rule->nr = tail->nr + 1;
+ last = RB_MAX(pf_ruletree, ruleset->rules.inactive.ptr);
+ if (last)
+ rule->nr = last->nr + 1;
else
rule->nr = 0;
@@ -1442,8 +1441,8 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
NET_UNLOCK();
goto fail;
}
- TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
- rule, entries);
+ RB_INSERT(pf_ruletree, ruleset->rules.inactive.ptr, rule);
+ rule->usage = PF_USAGE_USED;
rule->ruleset = ruleset;
ruleset->rules.inactive.rcount++;
PF_UNLOCK();
@@ -1454,7 +1453,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
case DIOCGETRULES: {
struct pfioc_rule *pr = (struct pfioc_rule *)addr;
struct pf_ruleset *ruleset;
- struct pf_rule *tail;
+ struct pf_rule *last;
NET_LOCK();
PF_LOCK();
@@ -1466,9 +1465,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
NET_UNLOCK();
goto fail;
}
- tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
- if (tail)
- pr->nr = tail->nr + 1;
+ last = RB_MAX(pf_ruletree, ruleset->rules.active.ptr);
+ if (last)
+ pr->nr = last->nr + 1;
else
pr->nr = 0;
pr->ticket = ruleset->rules.active.ticket;
@@ -1499,9 +1498,8 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
NET_UNLOCK();
goto fail;
}
- rule = TAILQ_FIRST(ruleset->rules.active.ptr);
- while ((rule != NULL) && (rule->nr != pr->nr))
- rule = TAILQ_NEXT(rule, entries);
+ struct pf_rule find = { .nr = pr->nr };
+ rule = RB_FIND(pf_ruletree, ruleset->rules.active.ptr, &find);
if (rule == NULL) {
error = EBUSY;
PF_UNLOCK();
@@ -1676,14 +1674,17 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
}
if (pcr->action == PF_CHANGE_ADD_HEAD)
- oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
+ oldrule = RB_MIN(pf_ruletree,
+ ruleset->rules.active.ptr);
else if (pcr->action == PF_CHANGE_ADD_TAIL)
- oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
- pf_rulequeue);
+ oldrule = RB_MAX(pf_ruletree,
+ ruleset->rules.active.ptr);
else {
- oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
+ oldrule = RB_MIN(pf_ruletree,
+ ruleset->rules.active.ptr);
while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
- oldrule = TAILQ_NEXT(oldrule, entries);
+ oldrule = RB_NEXT(pf_ruletree,
+ ruleset->rules.active.ptr, oldrule);
if (oldrule == NULL) {
if (newrule != NULL)
pf_rm_rule(NULL, newrule);
@@ -1698,23 +1699,14 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags,
struct proc *p)
pf_rm_rule(ruleset->rules.active.ptr, oldrule);
ruleset->rules.active.rcount--;
} else {
- if (oldrule == NULL)
- TAILQ_INSERT_TAIL(
- ruleset->rules.active.ptr,
- newrule, entries);
- else if (pcr->action == PF_CHANGE_ADD_HEAD ||
- pcr->action == PF_CHANGE_ADD_BEFORE)
- TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
- else
- TAILQ_INSERT_AFTER(
- ruleset->rules.active.ptr,
- oldrule, newrule, entries);
+ RB_INSERT(pf_ruletree,
+ ruleset->rules.active.ptr, newrule);
ruleset->rules.active.rcount++;
newrule->ruleset = ruleset;
}
nr = 0;
- TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
+ RB_FOREACH(oldrule, pf_ruletree, ruleset->rules.active.ptr)
oldrule->nr = nr++;
ruleset->rules.active.ticket++;
diff --git sys/net/pf_ruleset.c sys/net/pf_ruleset.c
index 6131895751e..adf5a73dcb8 100644
--- sys/net/pf_ruleset.c
+++ sys/net/pf_ruleset.c
@@ -87,8 +87,22 @@ struct pool pf_anchor_pl;
struct pf_anchor_global pf_anchors;
struct pf_anchor pf_main_anchor;
+static __inline int pf_rule_compare(struct pf_rule *, struct pf_rule *);
static __inline int pf_anchor_compare(struct pf_anchor *, struct pf_anchor *);
+RB_GENERATE(pf_ruletree, pf_rule, entries, pf_rule_compare);
+
+static __inline int
+pf_rule_compare(struct pf_rule *a, struct pf_rule *b)
+{
+ if (a->nr == b->nr)
+ return (0);
+ else if (a->nr > b->nr)
+ return (1);
+ else
+ return (-1);
+}
+
RB_GENERATE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare);
RB_GENERATE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare);
@@ -104,10 +118,10 @@ void
pf_init_ruleset(struct pf_ruleset *ruleset)
{
memset(ruleset, 0, sizeof(struct pf_ruleset));
- TAILQ_INIT(&ruleset->rules.queues[0]);
- TAILQ_INIT(&ruleset->rules.queues[1]);
- ruleset->rules.active.ptr = &ruleset->rules.queues[0];
- ruleset->rules.inactive.ptr = &ruleset->rules.queues[1];
+ RB_INIT(&ruleset->rules.trees[0]);
+ RB_INIT(&ruleset->rules.trees[1]);
+ ruleset->rules.active.ptr = &ruleset->rules.trees[0];
+ ruleset->rules.inactive.ptr = &ruleset->rules.trees[1];
}
struct pf_anchor *
@@ -300,8 +314,8 @@ pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
ruleset->topen)
return;
- if (!TAILQ_EMPTY(ruleset->rules.active.ptr) ||
- !TAILQ_EMPTY(ruleset->rules.inactive.ptr) ||
+ if (!RB_EMPTY(ruleset->rules.active.ptr) ||
+ !RB_EMPTY(ruleset->rules.inactive.ptr) ||
ruleset->rules.inactive.open)
return;
RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
diff --git sys/net/pfvar.h sys/net/pfvar.h
index 6f88ff687ed..4e3b877856c 100644
--- sys/net/pfvar.h
+++ sys/net/pfvar.h
@@ -506,7 +506,11 @@ struct pf_rule {
char overload_tblname[PF_TABLE_NAME_SIZE];
- TAILQ_ENTRY(pf_rule) entries;
+ RB_ENTRY(pf_rule) entries;
+#define PF_USAGE_UNUSED 0
+#define PF_USAGE_USED 1
+#define PF_USAGE_DEFAULT 2
+ uint8_t usage;
struct pf_pool nat;
struct pf_pool rdr;
struct pf_pool route;
@@ -918,15 +922,15 @@ struct pfsync_state {
d += ntohl(s[1]); \
} while (0)
-TAILQ_HEAD(pf_rulequeue, pf_rule);
+RB_HEAD(pf_ruletree, pf_rule);
struct pf_anchor;
struct pf_ruleset {
struct {
- struct pf_rulequeue queues[2];
+ struct pf_ruletree trees[2];
struct {
- struct pf_rulequeue *ptr;
+ struct pf_ruletree *ptr;
u_int32_t rcount;
u_int32_t ticket;
int open;
@@ -937,6 +941,7 @@ struct pf_ruleset {
int tables;
int topen;
};
+RB_PROTOTYPE(pf_ruletree, pf_rule, entries, pf_rule_compare)
RB_HEAD(pf_anchor_global, pf_anchor);
RB_HEAD(pf_anchor_node, pf_anchor);
@@ -1454,7 +1459,7 @@ struct pf_pktdelay {
#define PFFRAG_FRENT_HIWAT (NMBCLUSTERS / 16) /* Number of entries */
#define PFFRAG_FRAG_HIWAT (NMBCLUSTERS / 32) /* Number of packets */
-#define PFR_KTABLE_HIWAT 1000 /* Number of tables */
+#define PFR_KTABLE_HIWAT 50000 /* Number of tables */
#define PFR_KENTRY_HIWAT 200000 /* Number of table entries */
#define PFR_KENTRY_HIWAT_SMALL 100000 /* Number of entries for tiny hosts */
@@ -1713,7 +1718,7 @@ extern int
pf_tbladdr_setup(struct pf_ruleset *,
struct pf_addr_wrap *, int);
extern void pf_tbladdr_remove(struct pf_addr_wrap *);
extern void pf_tbladdr_copyout(struct pf_addr_wrap *);
-extern void pf_calc_skip_steps(struct pf_rulequeue *);
+extern void pf_calc_skip_steps(struct pf_ruletree *);
extern void pf_purge_expired_src_nodes(void);
extern void pf_purge_expired_states(u_int32_t);
extern void pf_purge_expired_rules(void);
@@ -1744,7 +1749,7 @@ extern void pf_print_state(struct
pf_state *);
extern void pf_print_flags(u_int8_t);
extern void pf_addrcpy(struct pf_addr *, struct pf_addr *,
sa_family_t);
-void pf_rm_rule(struct pf_rulequeue *,
+void pf_rm_rule(struct pf_ruletree *,
struct pf_rule *);
void pf_purge_rule(struct pf_rule *);
struct pf_divert *pf_find_divert(struct mbuf *);
Best Regards,
Stefan Butz
smime.p7s
Description: S/MIME cryptographic signature
